repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
fabaff/python-mystrom | pymystrom/switch.py | MyStromPlug.get_temperature | python | def get_temperature(self):
try:
request = requests.get(
'{}/temp'.format(self.resource), timeout=self.timeout, allow_redirects=False)
self.temperature = request.json()['compensated']
return self.temperature
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
except ValueError:
raise exceptions.MyStromNotVersionTwoSwitch() | Get current temperature in celsius. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/switch.py#L77-L87 | null | class MyStromPlug(object):
"""A class for a myStrom switch."""
def __init__(self, host):
"""Initialize the switch."""
self.resource = 'http://{}'.format(host)
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.temperature = 0
def set_relay_on(self):
"""Turn the relay on."""
if not self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '1'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_relay_off(self):
"""Turn the relay off."""
if self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '0'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = False
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def get_status(self):
"""Get the details from the switch."""
try:
request = requests.get(
'{}/report'.format(self.resource), timeout=self.timeout)
self.data = request.json()
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_relay_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['relay']
except TypeError:
self.state = False
return bool(self.state)
def get_consumption(self):
"""Get current power consumption in mWh."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.get_status | python | def get_status(self):
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError() | Get the details from the bulb. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L31-L42 | null | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.get_bulb_state | python | def get_bulb_state(self):
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state) | Get the relay state. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L44-L52 | [
"def get_status(self):\n \"\"\"Get the details from the bulb.\"\"\"\n try:\n request = requests.get(\n '{}/{}/'.format(self.resource, URI), timeout=self.timeout)\n raw_data = request.json()\n # Doesn't always work !!!!!\n #self._mac = next(iter(self.raw_data))\n s... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.get_power | python | def get_power(self):
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption | Get current power. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L54-L62 | [
"def get_status(self):\n \"\"\"Get the details from the bulb.\"\"\"\n try:\n request = requests.get(\n '{}/{}/'.format(self.resource, URI), timeout=self.timeout)\n raw_data = request.json()\n # Doesn't always work !!!!!\n #self._mac = next(iter(self.raw_data))\n s... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.get_firmware | python | def get_firmware(self):
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware | Get the current firmware version. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L64-L72 | [
"def get_status(self):\n \"\"\"Get the details from the bulb.\"\"\"\n try:\n request = requests.get(\n '{}/{}/'.format(self.resource, URI), timeout=self.timeout)\n raw_data = request.json()\n # Doesn't always work !!!!!\n #self._mac = next(iter(self.raw_data))\n s... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.get_brightness | python | def get_brightness(self):
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness | Get current brightness. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L74-L82 | [
"def get_status(self):\n \"\"\"Get the details from the bulb.\"\"\"\n try:\n request = requests.get(\n '{}/{}/'.format(self.resource, URI), timeout=self.timeout)\n raw_data = request.json()\n # Doesn't always work !!!!!\n #self._mac = next(iter(self.raw_data))\n s... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.get_transition_time | python | def get_transition_time(self):
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time | Get the transition time in ms. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L84-L92 | [
"def get_status(self):\n \"\"\"Get the details from the bulb.\"\"\"\n try:\n request = requests.get(\n '{}/{}/'.format(self.resource, URI), timeout=self.timeout)\n raw_data = request.json()\n # Doesn't always work !!!!!\n #self._mac = next(iter(self.raw_data))\n s... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.get_color | python | def get_color(self):
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode} | Get current color. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L94-L104 | [
"def get_status(self):\n \"\"\"Get the details from the bulb.\"\"\"\n try:\n request = requests.get(\n '{}/{}/'.format(self.resource, URI), timeout=self.timeout)\n raw_data = request.json()\n # Doesn't always work !!!!!\n #self._mac = next(iter(self.raw_data))\n s... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.set_color_hsv | python | def set_color_hsv(self, hue, saturation, value):
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError() | Turn the bulb on with the given values as HSV. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L139-L149 | null | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.set_rainbow | python | def set_rainbow(self, duration):
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359) | Turn the bulb on and create a rainbow. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L155-L159 | [
"def set_color_hsv(self, hue, saturation, value):\n \"\"\"Turn the bulb on with the given values as HSV.\"\"\"\n try:\n data = \"action=on&color={};{};{}\".format(hue, saturation, value)\n request = requests.post(\n '{}/{}/{}'.format(self.resource, URI, self._mac),\n data=d... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.set_sunrise | python | def set_sunrise(self, duration):
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100) | Turn the bulb on and create a sunrise. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L161-L174 | [
"def set_transition_time(self, value):\n \"\"\"Set the transition time in ms.\"\"\"\n try:\n request = requests.post(\n '{}/{}/{}/'.format(self.resource, URI, self._mac),\n data={'ramp': value}, timeout=self.timeout)\n if request.status_code == 200:\n pass\n e... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.set_flashing | python | def set_flashing(self, duration, hsv1, hsv2):
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1) | Turn the bulb on, flashing with two colors. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L176-L183 | [
"def set_color_hsv(self, hue, saturation, value):\n \"\"\"Turn the bulb on with the given values as HSV.\"\"\"\n try:\n data = \"action=on&color={};{};{}\".format(hue, saturation, value)\n request = requests.post(\n '{}/{}/{}'.format(self.resource, URI, self._mac),\n data=d... | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_off(self):
"""Turn the bulb off."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.set_off | python | def set_off(self):
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'off'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError() | Turn the bulb off. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L196-L205 | null | class MyStromBulb(object):
"""A class for a myStrom bulb."""
def __init__(self, host, mac):
"""Initialize the bulb."""
self.resource = 'http://{}'.format(host)
self._mac = mac
self.timeout = 5
self.data = None
self.state = None
self.consumption = 0
self.brightness = 0
self.color = None
self.firmware = None
self.mode = None
self.transition_time = 0
def get_status(self):
"""Get the details from the bulb."""
try:
request = requests.get(
'{}/{}/'.format(self.resource, URI), timeout=self.timeout)
raw_data = request.json()
# Doesn't always work !!!!!
#self._mac = next(iter(self.raw_data))
self.data = raw_data[self._mac]
return self.data
except (requests.exceptions.ConnectionError, ValueError):
raise exceptions.MyStromConnectionError()
def get_bulb_state(self):
"""Get the relay state."""
self.get_status()
try:
self.state = self.data['on']
except TypeError:
self.state = False
return bool(self.state)
def get_power(self):
"""Get current power."""
self.get_status()
try:
self.consumption = self.data['power']
except TypeError:
self.consumption = 0
return self.consumption
def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware
def get_brightness(self):
"""Get current brightness."""
self.get_status()
try:
self.brightness = self.data['color'].split(';')[-1]
except TypeError:
self.brightness = 0
return self.brightness
def get_transition_time(self):
"""Get the transition time in ms."""
self.get_status()
try:
self.transition_time = self.data['ramp']
except TypeError:
self.transition_time = 0
return self.transition_time
def get_color(self):
"""Get current color."""
self.get_status()
try:
self.color = self.data['color']
self.mode = self.data['mode']
except TypeError:
self.color = 0
self.mode = ''
return {'color': self.color, 'mode': self.mode}
def set_on(self):
"""Turn the bulb on with the previous settings."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'action': 'on'}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hex(self, value):
"""Turn the bulb on with the given color as HEX.
white: FF000000
red: 00FF0000
green: 0000FF00
blue: 000000FF
"""
data = {
'action': 'on',
'color': value,
}
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
json=data, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_color_hsv(self, hue, saturation, value):
"""Turn the bulb on with the given values as HSV."""
try:
data = "action=on&color={};{};{}".format(hue, saturation, value)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
def set_white(self):
"""Turn the bulb on, full white."""
self.set_color_hsv(0, 0, 100)
def set_rainbow(self, duration):
"""Turn the bulb on and create a rainbow."""
for i in range(0, 359):
self.set_color_hsv(i, 100, 100)
time.sleep(duration/359)
def set_sunrise(self, duration):
"""Turn the bulb on and create a sunrise."""
self.set_transition_time(duration/100)
for i in range(0, duration):
try:
data = "action=on&color=3;{}".format(i)
request = requests.post(
'{}/{}/{}'.format(self.resource, URI, self._mac),
data=data, timeout=self.timeout)
if request.status_code == 200:
self.data['on'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
time.sleep(duration/100)
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1)
def set_transition_time(self, value):
"""Set the transition time in ms."""
try:
request = requests.post(
'{}/{}/{}/'.format(self.resource, URI, self._mac),
data={'ramp': value}, timeout=self.timeout)
if request.status_code == 200:
pass
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
fabaff/python-mystrom | pymystrom/cli.py | read_config | python | def read_config(ip, mac):
click.echo("Read configuration from %s" % ip)
request = requests.get(
'http://{}/{}/{}/'.format(ip, URI, mac), timeout=TIMEOUT)
print(request.json()) | Read the current configuration of a myStrom device. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L35-L40 | null | """
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import requests
import click
from pymystrom.bulb import MyStromBulb
URI = 'api/v1/device'
TIMEOUT = 5
@click.group()
@click.version_option()
def main():
"""Simple command-line tool to get and set the values of a myStrom devices.
This tool can set the targets of a myStrom button for the different
available actions single, double, long and touch.
"""
@main.group('config')
def config():
"""Get and set the configuration of a myStrom device."""
@config.command('read')
@click.option('--ip', prompt="IP address of the device",
help="IP address of the device.")
@click.option('--mac', prompt="MAC address of the device",
help="MAC address of the device.")
@main.group('button')
def button():
"""Get and set details of a myStrom button."""
@button.command('generic')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--single', prompt="URL for a single tap", default="",
help="URL for a single tap.")
@click.option('--double', prompt="URL for a double tap", default="",
help="URL for a double tap.")
@click.option('--long', prompt="URL for a long tab", default="",
help="URL for a long tab.")
@click.option('--touch', prompt="URL for a touch", default="",
help="URL for a touch.")
def write_config(ip, mac, single, double, long, touch):
"""Write the current configuration of a myStrom button."""
click.echo("Write configuration to device %s" % ip)
data = {
'single': single,
'double': double,
'long': long,
'touch': touch,
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration of %s set" % mac)
@button.command('home-assistant')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--hass', prompt="IP address of the Home Assistant instance",
help="IP address of Home Assistant instance to use.")
@click.option('--port', prompt="Port of Home Assistant instance",
default="8123",
help="Port where Home Assistant instance is listening.")
@click.option('--id', prompt="ID of the button", default="",
help="ID of the myStrom button.")
def write_ha_config(ip, mac, hass, port, id):
"""Write the configuration for Home Assistant to a myStrom button."""
click.echo("Write configuration for Home Assistant to device %s..." % ip)
action = "get://{1}:{2}/api/mystrom?{0}={3}"
data = {
'single': action.format('single', hass, port, id),
'double': action.format('double', hass, port, id),
'long': action.format('long', hass, port, id),
'touch': action.format('touch', hass, port, id),
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration for %s set" % ip)
click.echo("After using the push pattern the first time then "
"the myStrom WiFi Button will show up as %s" % id)
@button.command('reset')
@click.option('--ip', prompt="IP address of the WiFi Button",
help="P address of the WiFi Button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the Wifi Button.")
def reset_config(ip, mac):
"""Reset the current configuration of a myStrom WiFi Button."""
click.echo("Reset configuration of button %s..." % ip)
data = {
'single': "",
'double': "",
'long': "",
'touch': "",
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Reset configuration of %s" % mac)
@main.group('bulb')
def bulb():
"""Get and set details of a myStrom bulb."""
@bulb.command('on')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def on(ip, mac):
"""Switch the bulb on."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hex('000000FF')
@bulb.command('color')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
@click.option('--hue', prompt="Set the hue of the bulb",
help="Set the hue of the bulb.")
@click.option('--saturation', prompt="Set the saturation of the bulb",
help="Set the saturation of the bulb.")
@click.option('--value', prompt="Set the value of the bulb",
help="Set the value of the bulb.")
def color(ip, mac, hue, saturation, value):
"""Switch the bulb on with the given color."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hsv(hue, saturation, value)
@bulb.command('off')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def off(ip, mac):
"""Switch the bulb off."""
bulb = MyStromBulb(ip, mac)
bulb.set_off()
if __name__ == '__main__':
main()
|
fabaff/python-mystrom | pymystrom/cli.py | write_config | python | def write_config(ip, mac, single, double, long, touch):
click.echo("Write configuration to device %s" % ip)
data = {
'single': single,
'double': double,
'long': long,
'touch': touch,
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration of %s set" % mac) | Write the current configuration of a myStrom button. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L61-L74 | null | """
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import requests
import click
from pymystrom.bulb import MyStromBulb
URI = 'api/v1/device'
TIMEOUT = 5
@click.group()
@click.version_option()
def main():
"""Simple command-line tool to get and set the values of a myStrom devices.
This tool can set the targets of a myStrom button for the different
available actions single, double, long and touch.
"""
@main.group('config')
def config():
"""Get and set the configuration of a myStrom device."""
@config.command('read')
@click.option('--ip', prompt="IP address of the device",
help="IP address of the device.")
@click.option('--mac', prompt="MAC address of the device",
help="MAC address of the device.")
def read_config(ip, mac):
"""Read the current configuration of a myStrom device."""
click.echo("Read configuration from %s" % ip)
request = requests.get(
'http://{}/{}/{}/'.format(ip, URI, mac), timeout=TIMEOUT)
print(request.json())
@main.group('button')
def button():
"""Get and set details of a myStrom button."""
@button.command('generic')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--single', prompt="URL for a single tap", default="",
help="URL for a single tap.")
@click.option('--double', prompt="URL for a double tap", default="",
help="URL for a double tap.")
@click.option('--long', prompt="URL for a long tab", default="",
help="URL for a long tab.")
@click.option('--touch', prompt="URL for a touch", default="",
help="URL for a touch.")
@button.command('home-assistant')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--hass', prompt="IP address of the Home Assistant instance",
help="IP address of Home Assistant instance to use.")
@click.option('--port', prompt="Port of Home Assistant instance",
default="8123",
help="Port where Home Assistant instance is listening.")
@click.option('--id', prompt="ID of the button", default="",
help="ID of the myStrom button.")
def write_ha_config(ip, mac, hass, port, id):
"""Write the configuration for Home Assistant to a myStrom button."""
click.echo("Write configuration for Home Assistant to device %s..." % ip)
action = "get://{1}:{2}/api/mystrom?{0}={3}"
data = {
'single': action.format('single', hass, port, id),
'double': action.format('double', hass, port, id),
'long': action.format('long', hass, port, id),
'touch': action.format('touch', hass, port, id),
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration for %s set" % ip)
click.echo("After using the push pattern the first time then "
"the myStrom WiFi Button will show up as %s" % id)
@button.command('reset')
@click.option('--ip', prompt="IP address of the WiFi Button",
help="P address of the WiFi Button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the Wifi Button.")
def reset_config(ip, mac):
"""Reset the current configuration of a myStrom WiFi Button."""
click.echo("Reset configuration of button %s..." % ip)
data = {
'single': "",
'double': "",
'long': "",
'touch': "",
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Reset configuration of %s" % mac)
@main.group('bulb')
def bulb():
"""Get and set details of a myStrom bulb."""
@bulb.command('on')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def on(ip, mac):
"""Switch the bulb on."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hex('000000FF')
@bulb.command('color')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
@click.option('--hue', prompt="Set the hue of the bulb",
help="Set the hue of the bulb.")
@click.option('--saturation', prompt="Set the saturation of the bulb",
help="Set the saturation of the bulb.")
@click.option('--value', prompt="Set the value of the bulb",
help="Set the value of the bulb.")
def color(ip, mac, hue, saturation, value):
"""Switch the bulb on with the given color."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hsv(hue, saturation, value)
@bulb.command('off')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def off(ip, mac):
"""Switch the bulb off."""
bulb = MyStromBulb(ip, mac)
bulb.set_off()
if __name__ == '__main__':
main()
|
fabaff/python-mystrom | pymystrom/cli.py | write_ha_config | python | def write_ha_config(ip, mac, hass, port, id):
click.echo("Write configuration for Home Assistant to device %s..." % ip)
action = "get://{1}:{2}/api/mystrom?{0}={3}"
data = {
'single': action.format('single', hass, port, id),
'double': action.format('double', hass, port, id),
'long': action.format('long', hass, port, id),
'touch': action.format('touch', hass, port, id),
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration for %s set" % ip)
click.echo("After using the push pattern the first time then "
"the myStrom WiFi Button will show up as %s" % id) | Write the configuration for Home Assistant to a myStrom button. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L89-L106 | null | """
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import requests
import click
from pymystrom.bulb import MyStromBulb
URI = 'api/v1/device'
TIMEOUT = 5
@click.group()
@click.version_option()
def main():
"""Simple command-line tool to get and set the values of a myStrom devices.
This tool can set the targets of a myStrom button for the different
available actions single, double, long and touch.
"""
@main.group('config')
def config():
"""Get and set the configuration of a myStrom device."""
@config.command('read')
@click.option('--ip', prompt="IP address of the device",
help="IP address of the device.")
@click.option('--mac', prompt="MAC address of the device",
help="MAC address of the device.")
def read_config(ip, mac):
"""Read the current configuration of a myStrom device."""
click.echo("Read configuration from %s" % ip)
request = requests.get(
'http://{}/{}/{}/'.format(ip, URI, mac), timeout=TIMEOUT)
print(request.json())
@main.group('button')
def button():
"""Get and set details of a myStrom button."""
@button.command('generic')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--single', prompt="URL for a single tap", default="",
help="URL for a single tap.")
@click.option('--double', prompt="URL for a double tap", default="",
help="URL for a double tap.")
@click.option('--long', prompt="URL for a long tab", default="",
help="URL for a long tab.")
@click.option('--touch', prompt="URL for a touch", default="",
help="URL for a touch.")
def write_config(ip, mac, single, double, long, touch):
"""Write the current configuration of a myStrom button."""
click.echo("Write configuration to device %s" % ip)
data = {
'single': single,
'double': double,
'long': long,
'touch': touch,
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration of %s set" % mac)
@button.command('home-assistant')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--hass', prompt="IP address of the Home Assistant instance",
help="IP address of Home Assistant instance to use.")
@click.option('--port', prompt="Port of Home Assistant instance",
default="8123",
help="Port where Home Assistant instance is listening.")
@click.option('--id', prompt="ID of the button", default="",
help="ID of the myStrom button.")
@button.command('reset')
@click.option('--ip', prompt="IP address of the WiFi Button",
help="P address of the WiFi Button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the Wifi Button.")
def reset_config(ip, mac):
"""Reset the current configuration of a myStrom WiFi Button."""
click.echo("Reset configuration of button %s..." % ip)
data = {
'single': "",
'double': "",
'long': "",
'touch': "",
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Reset configuration of %s" % mac)
@main.group('bulb')
def bulb():
"""Get and set details of a myStrom bulb."""
@bulb.command('on')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def on(ip, mac):
"""Switch the bulb on."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hex('000000FF')
@bulb.command('color')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
@click.option('--hue', prompt="Set the hue of the bulb",
help="Set the hue of the bulb.")
@click.option('--saturation', prompt="Set the saturation of the bulb",
help="Set the saturation of the bulb.")
@click.option('--value', prompt="Set the value of the bulb",
help="Set the value of the bulb.")
def color(ip, mac, hue, saturation, value):
"""Switch the bulb on with the given color."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hsv(hue, saturation, value)
@bulb.command('off')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def off(ip, mac):
"""Switch the bulb off."""
bulb = MyStromBulb(ip, mac)
bulb.set_off()
if __name__ == '__main__':
main()
|
fabaff/python-mystrom | pymystrom/cli.py | reset_config | python | def reset_config(ip, mac):
click.echo("Reset configuration of button %s..." % ip)
data = {
'single': "",
'double': "",
'long': "",
'touch': "",
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Reset configuration of %s" % mac) | Reset the current configuration of a myStrom WiFi Button. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L114-L127 | null | """
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import requests
import click
from pymystrom.bulb import MyStromBulb
URI = 'api/v1/device'
TIMEOUT = 5
@click.group()
@click.version_option()
def main():
"""Simple command-line tool to get and set the values of a myStrom devices.
This tool can set the targets of a myStrom button for the different
available actions single, double, long and touch.
"""
@main.group('config')
def config():
"""Get and set the configuration of a myStrom device."""
@config.command('read')
@click.option('--ip', prompt="IP address of the device",
help="IP address of the device.")
@click.option('--mac', prompt="MAC address of the device",
help="MAC address of the device.")
def read_config(ip, mac):
"""Read the current configuration of a myStrom device."""
click.echo("Read configuration from %s" % ip)
request = requests.get(
'http://{}/{}/{}/'.format(ip, URI, mac), timeout=TIMEOUT)
print(request.json())
@main.group('button')
def button():
"""Get and set details of a myStrom button."""
@button.command('generic')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--single', prompt="URL for a single tap", default="",
help="URL for a single tap.")
@click.option('--double', prompt="URL for a double tap", default="",
help="URL for a double tap.")
@click.option('--long', prompt="URL for a long tab", default="",
help="URL for a long tab.")
@click.option('--touch', prompt="URL for a touch", default="",
help="URL for a touch.")
def write_config(ip, mac, single, double, long, touch):
"""Write the current configuration of a myStrom button."""
click.echo("Write configuration to device %s" % ip)
data = {
'single': single,
'double': double,
'long': long,
'touch': touch,
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration of %s set" % mac)
@button.command('home-assistant')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--hass', prompt="IP address of the Home Assistant instance",
help="IP address of Home Assistant instance to use.")
@click.option('--port', prompt="Port of Home Assistant instance",
default="8123",
help="Port where Home Assistant instance is listening.")
@click.option('--id', prompt="ID of the button", default="",
help="ID of the myStrom button.")
def write_ha_config(ip, mac, hass, port, id):
"""Write the configuration for Home Assistant to a myStrom button."""
click.echo("Write configuration for Home Assistant to device %s..." % ip)
action = "get://{1}:{2}/api/mystrom?{0}={3}"
data = {
'single': action.format('single', hass, port, id),
'double': action.format('double', hass, port, id),
'long': action.format('long', hass, port, id),
'touch': action.format('touch', hass, port, id),
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration for %s set" % ip)
click.echo("After using the push pattern the first time then "
"the myStrom WiFi Button will show up as %s" % id)
@button.command('reset')
@click.option('--ip', prompt="IP address of the WiFi Button",
help="P address of the WiFi Button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the Wifi Button.")
@main.group('bulb')
def bulb():
"""Get and set details of a myStrom bulb."""
@bulb.command('on')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def on(ip, mac):
"""Switch the bulb on."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hex('000000FF')
@bulb.command('color')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
@click.option('--hue', prompt="Set the hue of the bulb",
help="Set the hue of the bulb.")
@click.option('--saturation', prompt="Set the saturation of the bulb",
help="Set the saturation of the bulb.")
@click.option('--value', prompt="Set the value of the bulb",
help="Set the value of the bulb.")
def color(ip, mac, hue, saturation, value):
"""Switch the bulb on with the given color."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hsv(hue, saturation, value)
@bulb.command('off')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def off(ip, mac):
"""Switch the bulb off."""
bulb = MyStromBulb(ip, mac)
bulb.set_off()
if __name__ == '__main__':
main()
|
fabaff/python-mystrom | pymystrom/cli.py | color | python | def color(ip, mac, hue, saturation, value):
bulb = MyStromBulb(ip, mac)
bulb.set_color_hsv(hue, saturation, value) | Switch the bulb on with the given color. | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L157-L160 | [
"def set_color_hsv(self, hue, saturation, value):\n \"\"\"Turn the bulb on with the given values as HSV.\"\"\"\n try:\n data = \"action=on&color={};{};{}\".format(hue, saturation, value)\n request = requests.post(\n '{}/{}/{}'.format(self.resource, URI, self._mac),\n data=d... | """
Copyright (c) 2017-2018 Fabian Affolter <fabian@affolter-engineering.ch>
Licensed under MIT. All rights reserved.
"""
import requests
import click
from pymystrom.bulb import MyStromBulb
URI = 'api/v1/device'
TIMEOUT = 5
@click.group()
@click.version_option()
def main():
"""Simple command-line tool to get and set the values of a myStrom devices.
This tool can set the targets of a myStrom button for the different
available actions single, double, long and touch.
"""
@main.group('config')
def config():
"""Get and set the configuration of a myStrom device."""
@config.command('read')
@click.option('--ip', prompt="IP address of the device",
help="IP address of the device.")
@click.option('--mac', prompt="MAC address of the device",
help="MAC address of the device.")
def read_config(ip, mac):
"""Read the current configuration of a myStrom device."""
click.echo("Read configuration from %s" % ip)
request = requests.get(
'http://{}/{}/{}/'.format(ip, URI, mac), timeout=TIMEOUT)
print(request.json())
@main.group('button')
def button():
"""Get and set details of a myStrom button."""
@button.command('generic')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--single', prompt="URL for a single tap", default="",
help="URL for a single tap.")
@click.option('--double', prompt="URL for a double tap", default="",
help="URL for a double tap.")
@click.option('--long', prompt="URL for a long tab", default="",
help="URL for a long tab.")
@click.option('--touch', prompt="URL for a touch", default="",
help="URL for a touch.")
def write_config(ip, mac, single, double, long, touch):
"""Write the current configuration of a myStrom button."""
click.echo("Write configuration to device %s" % ip)
data = {
'single': single,
'double': double,
'long': long,
'touch': touch,
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration of %s set" % mac)
@button.command('home-assistant')
@click.option('--ip', prompt="IP address of the button",
help="IP address of the button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the button.")
@click.option('--hass', prompt="IP address of the Home Assistant instance",
help="IP address of Home Assistant instance to use.")
@click.option('--port', prompt="Port of Home Assistant instance",
default="8123",
help="Port where Home Assistant instance is listening.")
@click.option('--id', prompt="ID of the button", default="",
help="ID of the myStrom button.")
def write_ha_config(ip, mac, hass, port, id):
"""Write the configuration for Home Assistant to a myStrom button."""
click.echo("Write configuration for Home Assistant to device %s..." % ip)
action = "get://{1}:{2}/api/mystrom?{0}={3}"
data = {
'single': action.format('single', hass, port, id),
'double': action.format('double', hass, port, id),
'long': action.format('long', hass, port, id),
'touch': action.format('touch', hass, port, id),
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Configuration for %s set" % ip)
click.echo("After using the push pattern the first time then "
"the myStrom WiFi Button will show up as %s" % id)
@button.command('reset')
@click.option('--ip', prompt="IP address of the WiFi Button",
help="P address of the WiFi Button.")
@click.option('--mac', prompt="MAC address of the button",
help="MAC address of the Wifi Button.")
def reset_config(ip, mac):
"""Reset the current configuration of a myStrom WiFi Button."""
click.echo("Reset configuration of button %s..." % ip)
data = {
'single': "",
'double': "",
'long': "",
'touch': "",
}
request = requests.post(
'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT)
if request.status_code == 200:
click.echo("Reset configuration of %s" % mac)
@main.group('bulb')
def bulb():
"""Get and set details of a myStrom bulb."""
@bulb.command('on')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def on(ip, mac):
"""Switch the bulb on."""
bulb = MyStromBulb(ip, mac)
bulb.set_color_hex('000000FF')
@bulb.command('color')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
@click.option('--hue', prompt="Set the hue of the bulb",
help="Set the hue of the bulb.")
@click.option('--saturation', prompt="Set the saturation of the bulb",
help="Set the saturation of the bulb.")
@click.option('--value', prompt="Set the value of the bulb",
help="Set the value of the bulb.")
@bulb.command('off')
@click.option('--ip', prompt="IP address of the bulb",
help="IP address of the bulb.")
@click.option('--mac', prompt="MAC address of the bulb",
help="MAC address of the bulb.")
def off(ip, mac):
"""Switch the bulb off."""
bulb = MyStromBulb(ip, mac)
bulb.set_off()
if __name__ == '__main__':
main()
|
workforce-data-initiative/skills-utils | skills_utils/metta.py | quarter_boundaries | python | def quarter_boundaries(quarter):
year, quarter = quarter.split('Q')
year = int(year)
quarter = int(quarter)
first_month_of_quarter = 3 * quarter - 2
last_month_of_quarter = 3 * quarter
first_day = date(year, first_month_of_quarter, 1)
last_day = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1])
return first_day, last_day | Returns first and last day of a quarter
Args:
quarter (str) quarter, in format '2015Q1'
Returns: (tuple) datetime.dates for the first and last days of the quarter | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/metta.py#L9-L24 | null | """Metta-data (https://github.com/dssg/metta-data) utilities"""
from datetime import date
from calendar import monthrange
import metta
import pandas as pd
from random import randint
def metta_config(quarter, num_dimensions):
"""Returns metta metadata for a quarter's SOC code classifier matrix
Args:
quarter (str) quarter, in format '2015Q1'
num_dimensions (int) Number of features in matrix
Returns: (dict) metadata suitable for metta.archive_train_test
"""
first_day, last_day = quarter_boundaries(quarter)
return {
'start_time': first_day,
'end_time': last_day,
'prediction_window': 3, # ???
'label_name': 'onet_soc_code',
'label_type': 'categorical',
'matrix_id': 'job_postings_{}'.format(quarter),
'feature_names': ['doc2vec_{}'.format(i) for i in range(num_dimensions)],
}
def upload_to_metta(train_features_path, train_labels_path, test_features_path, test_labels_path, train_quarter, test_quarter, num_dimensions):
"""Store train and test matrices using metta
Args:
train_features_path (str) Path to matrix with train features
train_labels_path (str) Path to matrix with train labels
test_features_path (str) Path to matrix with test features
test_labels_path (str) Path to matrix with test labels
train_quarter (str) Quarter of train matrix
test_quarter (str) Quarter of test matrix
num_dimensions (int) Number of features
"""
train_config = metta_config(train_quarter, num_dimensions)
test_config = metta_config(test_quarter, num_dimensions)
X_train = pd.read_csv(train_features_path, sep=',')
X_train.columns = ['doc2vec_'+str(i) for i in range(X_train.shape[1])]
#X_train['label'] = pd.Series([randint(0,23) for i in range(len(X_train))])
Y_train = pd.read_csv(train_labels_path)
Y_train.columns = ['onet_soc_code']
train = pd.concat([X_train, Y_train], axis=1)
X_test = pd.read_csv(test_features_path, sep=',')
X_test.columns = ['doc2vec_'+str(i) for i in range(X_test.shape[1])]
#X_test['label'] = pd.Series([randint(0,23) for i in range(len(X_test))])
Y_test = pd.read_csv(test_labels_path)
Y_test.columns = ['onet_soc_code']
test = pd.concat([X_test, Y_test], axis=1)
#print(train.head())
#print(train.shape)
#print(test.head())
#print(test.shape)
metta.archive_train_test(
train_config,
X_train,
test_config,
X_test,
directory='wdi'
)
if __name__ == '__main__':
upload_to_metta('../tmp/job_features_train_2011Q1.csv',
'../tmp/job_labels_train_2011Q1.csv',
'../tmp/job_features_test_2016Q1.csv',
'../tmp/job_labels_test_2016Q1.csv',
'2011Q1',
'2016Q1',
500)
|
workforce-data-initiative/skills-utils | skills_utils/metta.py | metta_config | python | def metta_config(quarter, num_dimensions):
first_day, last_day = quarter_boundaries(quarter)
return {
'start_time': first_day,
'end_time': last_day,
'prediction_window': 3, # ???
'label_name': 'onet_soc_code',
'label_type': 'categorical',
'matrix_id': 'job_postings_{}'.format(quarter),
'feature_names': ['doc2vec_{}'.format(i) for i in range(num_dimensions)],
} | Returns metta metadata for a quarter's SOC code classifier matrix
Args:
quarter (str) quarter, in format '2015Q1'
num_dimensions (int) Number of features in matrix
Returns: (dict) metadata suitable for metta.archive_train_test | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/metta.py#L27-L45 | [
"def quarter_boundaries(quarter):\n \"\"\"Returns first and last day of a quarter\n\n Args:\n quarter (str) quarter, in format '2015Q1'\n\n Returns: (tuple) datetime.dates for the first and last days of the quarter\n \"\"\"\n year, quarter = quarter.split('Q')\n year = int(year)\n quarte... | """Metta-data (https://github.com/dssg/metta-data) utilities"""
from datetime import date
from calendar import monthrange
import metta
import pandas as pd
from random import randint
def quarter_boundaries(quarter):
"""Returns first and last day of a quarter
Args:
quarter (str) quarter, in format '2015Q1'
Returns: (tuple) datetime.dates for the first and last days of the quarter
"""
year, quarter = quarter.split('Q')
year = int(year)
quarter = int(quarter)
first_month_of_quarter = 3 * quarter - 2
last_month_of_quarter = 3 * quarter
first_day = date(year, first_month_of_quarter, 1)
last_day = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1])
return first_day, last_day
def upload_to_metta(train_features_path, train_labels_path, test_features_path, test_labels_path, train_quarter, test_quarter, num_dimensions):
"""Store train and test matrices using metta
Args:
train_features_path (str) Path to matrix with train features
train_labels_path (str) Path to matrix with train labels
test_features_path (str) Path to matrix with test features
test_labels_path (str) Path to matrix with test labels
train_quarter (str) Quarter of train matrix
test_quarter (str) Quarter of test matrix
num_dimensions (int) Number of features
"""
train_config = metta_config(train_quarter, num_dimensions)
test_config = metta_config(test_quarter, num_dimensions)
X_train = pd.read_csv(train_features_path, sep=',')
X_train.columns = ['doc2vec_'+str(i) for i in range(X_train.shape[1])]
#X_train['label'] = pd.Series([randint(0,23) for i in range(len(X_train))])
Y_train = pd.read_csv(train_labels_path)
Y_train.columns = ['onet_soc_code']
train = pd.concat([X_train, Y_train], axis=1)
X_test = pd.read_csv(test_features_path, sep=',')
X_test.columns = ['doc2vec_'+str(i) for i in range(X_test.shape[1])]
#X_test['label'] = pd.Series([randint(0,23) for i in range(len(X_test))])
Y_test = pd.read_csv(test_labels_path)
Y_test.columns = ['onet_soc_code']
test = pd.concat([X_test, Y_test], axis=1)
#print(train.head())
#print(train.shape)
#print(test.head())
#print(test.shape)
metta.archive_train_test(
train_config,
X_train,
test_config,
X_test,
directory='wdi'
)
if __name__ == '__main__':
upload_to_metta('../tmp/job_features_train_2011Q1.csv',
'../tmp/job_labels_train_2011Q1.csv',
'../tmp/job_features_test_2016Q1.csv',
'../tmp/job_labels_test_2016Q1.csv',
'2011Q1',
'2016Q1',
500)
|
workforce-data-initiative/skills-utils | skills_utils/metta.py | upload_to_metta | python | def upload_to_metta(train_features_path, train_labels_path, test_features_path, test_labels_path, train_quarter, test_quarter, num_dimensions):
train_config = metta_config(train_quarter, num_dimensions)
test_config = metta_config(test_quarter, num_dimensions)
X_train = pd.read_csv(train_features_path, sep=',')
X_train.columns = ['doc2vec_'+str(i) for i in range(X_train.shape[1])]
#X_train['label'] = pd.Series([randint(0,23) for i in range(len(X_train))])
Y_train = pd.read_csv(train_labels_path)
Y_train.columns = ['onet_soc_code']
train = pd.concat([X_train, Y_train], axis=1)
X_test = pd.read_csv(test_features_path, sep=',')
X_test.columns = ['doc2vec_'+str(i) for i in range(X_test.shape[1])]
#X_test['label'] = pd.Series([randint(0,23) for i in range(len(X_test))])
Y_test = pd.read_csv(test_labels_path)
Y_test.columns = ['onet_soc_code']
test = pd.concat([X_test, Y_test], axis=1)
#print(train.head())
#print(train.shape)
#print(test.head())
#print(test.shape)
metta.archive_train_test(
train_config,
X_train,
test_config,
X_test,
directory='wdi'
) | Store train and test matrices using metta
Args:
train_features_path (str) Path to matrix with train features
train_labels_path (str) Path to matrix with train labels
test_features_path (str) Path to matrix with test features
test_labels_path (str) Path to matrix with test labels
train_quarter (str) Quarter of train matrix
test_quarter (str) Quarter of test matrix
num_dimensions (int) Number of features | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/metta.py#L47-L85 | [
"def metta_config(quarter, num_dimensions):\n \"\"\"Returns metta metadata for a quarter's SOC code classifier matrix\n\n Args:\n quarter (str) quarter, in format '2015Q1'\n num_dimensions (int) Number of features in matrix\n\n Returns: (dict) metadata suitable for metta.archive_train_test\n ... | """Metta-data (https://github.com/dssg/metta-data) utilities"""
from datetime import date
from calendar import monthrange
import metta
import pandas as pd
from random import randint
def quarter_boundaries(quarter):
"""Returns first and last day of a quarter
Args:
quarter (str) quarter, in format '2015Q1'
Returns: (tuple) datetime.dates for the first and last days of the quarter
"""
year, quarter = quarter.split('Q')
year = int(year)
quarter = int(quarter)
first_month_of_quarter = 3 * quarter - 2
last_month_of_quarter = 3 * quarter
first_day = date(year, first_month_of_quarter, 1)
last_day = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1])
return first_day, last_day
def metta_config(quarter, num_dimensions):
"""Returns metta metadata for a quarter's SOC code classifier matrix
Args:
quarter (str) quarter, in format '2015Q1'
num_dimensions (int) Number of features in matrix
Returns: (dict) metadata suitable for metta.archive_train_test
"""
first_day, last_day = quarter_boundaries(quarter)
return {
'start_time': first_day,
'end_time': last_day,
'prediction_window': 3, # ???
'label_name': 'onet_soc_code',
'label_type': 'categorical',
'matrix_id': 'job_postings_{}'.format(quarter),
'feature_names': ['doc2vec_{}'.format(i) for i in range(num_dimensions)],
}
if __name__ == '__main__':
upload_to_metta('../tmp/job_features_train_2011Q1.csv',
'../tmp/job_labels_train_2011Q1.csv',
'../tmp/job_features_test_2016Q1.csv',
'../tmp/job_labels_test_2016Q1.csv',
'2011Q1',
'2016Q1',
500)
|
workforce-data-initiative/skills-utils | skills_utils/s3.py | upload | python | def upload(s3_conn, filepath, s3_path):
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
filename = os.path.basename(filepath)
key = boto.s3.key.Key(
bucket=bucket,
name='{}/{}'.format(prefix, filename)
)
logging.info('uploading from %s to %s', filepath, key)
key.set_contents_from_filename(filepath) | Uploads the given file to s3
Args:
s3_conn: (boto.s3.connection) an s3 connection
filepath (str) the local filename
s3_path (str) the destination path on s3 | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/s3.py#L24-L41 | [
"def split_s3_path(path):\n \"\"\"\n Args:\n path: (str) an s3 path including a bucket\n (bucket_name/prefix/prefix2)\n Returns:\n A tuple containing the bucket name and full prefix)\n \"\"\"\n return path.split('/', 1)\n"
] | """
Common S3 utilities
"""
import boto
import json
import logging
import os
from collections.abc import MutableMapping
import s3fs
def split_s3_path(path):
"""
Args:
path: (str) an s3 path including a bucket
(bucket_name/prefix/prefix2)
Returns:
A tuple containing the bucket name and full prefix)
"""
return path.split('/', 1)
def upload_dict(s3_conn, s3_prefix, data_to_sync):
"""Syncs a dictionary to an S3 bucket, serializing each value in the
dictionary as a JSON file with the key as its name.
Args:
s3_conn: (boto.s3.connection) an s3 connection
s3_prefix: (str) the destination prefix
data_to_sync: (dict)
"""
bucket_name, prefix = split_s3_path(s3_prefix)
bucket = s3_conn.get_bucket(bucket_name)
for key, value in data_to_sync.items():
full_name = '{}/{}.json'.format(prefix, key)
s3_key = boto.s3.key.Key(
bucket=bucket,
name=full_name
)
logging.info('uploading key %s', full_name)
s3_key.set_contents_from_string(json.dumps(value))
def download(s3_conn, out_filename, s3_path):
"""Downloads the given s3_path
Args:
s3_conn (boto.s3.connection) a boto s3 connection
out_filename (str) local filename to save the file
s3_path (str) the source path on s3
"""
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name=prefix
)
logging.info('loading from %s into %s', key, out_filename)
key.get_contents_to_filename(out_filename, cb=log_download_progress)
def log_download_progress(num_bytes, obj_size):
"""Callback that boto can use to log download or upload progress"""
logging.info('%s bytes transferred out of %s total', num_bytes, obj_size)
def list_files(s3_conn, s3_path):
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name=prefix
)
files = []
for key in bucket.list(prefix=prefix):
files.append(key.name.split('/')[-1])
return list(filter(None, files))
class S3BackedJsonDict(MutableMapping):
"""A JSON-serializable dictionary that is backed by S3.
Not guaranteed to be thread or multiprocess-safe - An attempt is made before saving to merge
local changes with others that may have happened to the S3 file since this object was loaded,
but this is not atomic.
It is recommended that only one version of a given file be modified at a time.
Will periodically save, but users must call .save() before closing to save all changes.
Keyword Args:
path (string): A full s3 path, including bucket (but without the .json suffix),
used for saving the dictionary.
"""
SAVE_EVERY_N_UPDATES = 1000
def __init__(self, *args, **kw):
self.path = kw.pop('path') + '.json'
self.fs = s3fs.S3FileSystem()
if self.fs.exists(self.path):
with self.fs.open(self.path, 'rb') as f:
data = f.read().decode('utf-8') or '{}'
self._storage = json.loads(data)
else:
self._storage = dict()
self.num_updates = 0
logging.info('Loaded storage with %s keys', len(self))
def __getitem__(self, key):
return self._storage[key]
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def __delitem__(self, key):
del self._storage[key]
def __setitem__(self, key, value):
self._storage[key] = value
self.num_updates += 1
if self.num_updates % self.SAVE_EVERY_N_UPDATES == 0:
logging.info('Auto-saving after %s updates', self.num_updates)
self.save()
def __keytransform__(self, key):
return key
def __contains__(self, key):
return key in self._storage
def save(self):
logging.info('Attempting to save storage of length %s to %s', len(self), self.path)
if self.fs.exists(self.path):
with self.fs.open(self.path, 'rb') as f:
saved_string = f.read().decode('utf-8') or '{}'
saved_data = json.loads(saved_string)
logging.info(
'Merging %s in-memory keys with %s stored keys. In-memory data takes priority',
len(self),
len(saved_data)
)
saved_data.update(self._storage)
self._storage = saved_data
with self.fs.open(self.path, 'wb') as f:
f.write(json.dumps(self._storage).encode('utf-8'))
|
workforce-data-initiative/skills-utils | skills_utils/s3.py | upload_dict | python | def upload_dict(s3_conn, s3_prefix, data_to_sync):
bucket_name, prefix = split_s3_path(s3_prefix)
bucket = s3_conn.get_bucket(bucket_name)
for key, value in data_to_sync.items():
full_name = '{}/{}.json'.format(prefix, key)
s3_key = boto.s3.key.Key(
bucket=bucket,
name=full_name
)
logging.info('uploading key %s', full_name)
s3_key.set_contents_from_string(json.dumps(value)) | Syncs a dictionary to an S3 bucket, serializing each value in the
dictionary as a JSON file with the key as its name.
Args:
s3_conn: (boto.s3.connection) an s3 connection
s3_prefix: (str) the destination prefix
data_to_sync: (dict) | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/s3.py#L44-L63 | [
"def split_s3_path(path):\n \"\"\"\n Args:\n path: (str) an s3 path including a bucket\n (bucket_name/prefix/prefix2)\n Returns:\n A tuple containing the bucket name and full prefix)\n \"\"\"\n return path.split('/', 1)\n"
] | """
Common S3 utilities
"""
import boto
import json
import logging
import os
from collections.abc import MutableMapping
import s3fs
def split_s3_path(path):
"""
Args:
path: (str) an s3 path including a bucket
(bucket_name/prefix/prefix2)
Returns:
A tuple containing the bucket name and full prefix)
"""
return path.split('/', 1)
def upload(s3_conn, filepath, s3_path):
"""Uploads the given file to s3
Args:
s3_conn: (boto.s3.connection) an s3 connection
filepath (str) the local filename
s3_path (str) the destination path on s3
"""
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
filename = os.path.basename(filepath)
key = boto.s3.key.Key(
bucket=bucket,
name='{}/{}'.format(prefix, filename)
)
logging.info('uploading from %s to %s', filepath, key)
key.set_contents_from_filename(filepath)
def download(s3_conn, out_filename, s3_path):
"""Downloads the given s3_path
Args:
s3_conn (boto.s3.connection) a boto s3 connection
out_filename (str) local filename to save the file
s3_path (str) the source path on s3
"""
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name=prefix
)
logging.info('loading from %s into %s', key, out_filename)
key.get_contents_to_filename(out_filename, cb=log_download_progress)
def log_download_progress(num_bytes, obj_size):
"""Callback that boto can use to log download or upload progress"""
logging.info('%s bytes transferred out of %s total', num_bytes, obj_size)
def list_files(s3_conn, s3_path):
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name=prefix
)
files = []
for key in bucket.list(prefix=prefix):
files.append(key.name.split('/')[-1])
return list(filter(None, files))
class S3BackedJsonDict(MutableMapping):
"""A JSON-serializable dictionary that is backed by S3.
Not guaranteed to be thread or multiprocess-safe - An attempt is made before saving to merge
local changes with others that may have happened to the S3 file since this object was loaded,
but this is not atomic.
It is recommended that only one version of a given file be modified at a time.
Will periodically save, but users must call .save() before closing to save all changes.
Keyword Args:
path (string): A full s3 path, including bucket (but without the .json suffix),
used for saving the dictionary.
"""
SAVE_EVERY_N_UPDATES = 1000
def __init__(self, *args, **kw):
self.path = kw.pop('path') + '.json'
self.fs = s3fs.S3FileSystem()
if self.fs.exists(self.path):
with self.fs.open(self.path, 'rb') as f:
data = f.read().decode('utf-8') or '{}'
self._storage = json.loads(data)
else:
self._storage = dict()
self.num_updates = 0
logging.info('Loaded storage with %s keys', len(self))
def __getitem__(self, key):
return self._storage[key]
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def __delitem__(self, key):
del self._storage[key]
def __setitem__(self, key, value):
self._storage[key] = value
self.num_updates += 1
if self.num_updates % self.SAVE_EVERY_N_UPDATES == 0:
logging.info('Auto-saving after %s updates', self.num_updates)
self.save()
def __keytransform__(self, key):
return key
def __contains__(self, key):
return key in self._storage
def save(self):
logging.info('Attempting to save storage of length %s to %s', len(self), self.path)
if self.fs.exists(self.path):
with self.fs.open(self.path, 'rb') as f:
saved_string = f.read().decode('utf-8') or '{}'
saved_data = json.loads(saved_string)
logging.info(
'Merging %s in-memory keys with %s stored keys. In-memory data takes priority',
len(self),
len(saved_data)
)
saved_data.update(self._storage)
self._storage = saved_data
with self.fs.open(self.path, 'wb') as f:
f.write(json.dumps(self._storage).encode('utf-8'))
|
workforce-data-initiative/skills-utils | skills_utils/s3.py | download | python | def download(s3_conn, out_filename, s3_path):
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name=prefix
)
logging.info('loading from %s into %s', key, out_filename)
key.get_contents_to_filename(out_filename, cb=log_download_progress) | Downloads the given s3_path
Args:
s3_conn (boto.s3.connection) a boto s3 connection
out_filename (str) local filename to save the file
s3_path (str) the source path on s3 | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/s3.py#L66-L81 | [
"def split_s3_path(path):\n \"\"\"\n Args:\n path: (str) an s3 path including a bucket\n (bucket_name/prefix/prefix2)\n Returns:\n A tuple containing the bucket name and full prefix)\n \"\"\"\n return path.split('/', 1)\n"
] | """
Common S3 utilities
"""
import boto
import json
import logging
import os
from collections.abc import MutableMapping
import s3fs
def split_s3_path(path):
"""
Args:
path: (str) an s3 path including a bucket
(bucket_name/prefix/prefix2)
Returns:
A tuple containing the bucket name and full prefix)
"""
return path.split('/', 1)
def upload(s3_conn, filepath, s3_path):
"""Uploads the given file to s3
Args:
s3_conn: (boto.s3.connection) an s3 connection
filepath (str) the local filename
s3_path (str) the destination path on s3
"""
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
filename = os.path.basename(filepath)
key = boto.s3.key.Key(
bucket=bucket,
name='{}/{}'.format(prefix, filename)
)
logging.info('uploading from %s to %s', filepath, key)
key.set_contents_from_filename(filepath)
def upload_dict(s3_conn, s3_prefix, data_to_sync):
"""Syncs a dictionary to an S3 bucket, serializing each value in the
dictionary as a JSON file with the key as its name.
Args:
s3_conn: (boto.s3.connection) an s3 connection
s3_prefix: (str) the destination prefix
data_to_sync: (dict)
"""
bucket_name, prefix = split_s3_path(s3_prefix)
bucket = s3_conn.get_bucket(bucket_name)
for key, value in data_to_sync.items():
full_name = '{}/{}.json'.format(prefix, key)
s3_key = boto.s3.key.Key(
bucket=bucket,
name=full_name
)
logging.info('uploading key %s', full_name)
s3_key.set_contents_from_string(json.dumps(value))
def log_download_progress(num_bytes, obj_size):
"""Callback that boto can use to log download or upload progress"""
logging.info('%s bytes transferred out of %s total', num_bytes, obj_size)
def list_files(s3_conn, s3_path):
bucket_name, prefix = split_s3_path(s3_path)
bucket = s3_conn.get_bucket(bucket_name)
key = boto.s3.key.Key(
bucket=bucket,
name=prefix
)
files = []
for key in bucket.list(prefix=prefix):
files.append(key.name.split('/')[-1])
return list(filter(None, files))
class S3BackedJsonDict(MutableMapping):
"""A JSON-serializable dictionary that is backed by S3.
Not guaranteed to be thread or multiprocess-safe - An attempt is made before saving to merge
local changes with others that may have happened to the S3 file since this object was loaded,
but this is not atomic.
It is recommended that only one version of a given file be modified at a time.
Will periodically save, but users must call .save() before closing to save all changes.
Keyword Args:
path (string): A full s3 path, including bucket (but without the .json suffix),
used for saving the dictionary.
"""
SAVE_EVERY_N_UPDATES = 1000
def __init__(self, *args, **kw):
self.path = kw.pop('path') + '.json'
self.fs = s3fs.S3FileSystem()
if self.fs.exists(self.path):
with self.fs.open(self.path, 'rb') as f:
data = f.read().decode('utf-8') or '{}'
self._storage = json.loads(data)
else:
self._storage = dict()
self.num_updates = 0
logging.info('Loaded storage with %s keys', len(self))
def __getitem__(self, key):
return self._storage[key]
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def __delitem__(self, key):
del self._storage[key]
def __setitem__(self, key, value):
self._storage[key] = value
self.num_updates += 1
if self.num_updates % self.SAVE_EVERY_N_UPDATES == 0:
logging.info('Auto-saving after %s updates', self.num_updates)
self.save()
def __keytransform__(self, key):
return key
def __contains__(self, key):
return key in self._storage
def save(self):
logging.info('Attempting to save storage of length %s to %s', len(self), self.path)
if self.fs.exists(self.path):
with self.fs.open(self.path, 'rb') as f:
saved_string = f.read().decode('utf-8') or '{}'
saved_data = json.loads(saved_string)
logging.info(
'Merging %s in-memory keys with %s stored keys. In-memory data takes priority',
len(self),
len(saved_data)
)
saved_data.update(self._storage)
self._storage = saved_data
with self.fs.open(self.path, 'wb') as f:
f.write(json.dumps(self._storage).encode('utf-8'))
|
workforce-data-initiative/skills-utils | skills_utils/iteration.py | Batch.group | python | def group(self):
yield self.current
# start enumerate at 1 because we already yielded the last saved item
for num, item in enumerate(self.iterator, 1):
self.current = item
if num == self.limit:
break
yield item
else:
self.on_going = False | Yield a group from the iterable | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/iteration.py#L23-L33 | null | class Batch:
"""Yields batches (groups) from an iterable
Modified from:
http://codereview.stackexchange.com/questions/118883/split-up-an-iterable-into-batches
Args:
iterable (iterable) any iterable
limit (int) How many items to include per group
"""
def __init__(self, iterable, limit=None):
self.iterator = iter(iterable)
self.limit = limit
try:
self.current = next(self.iterator)
except StopIteration:
self.on_going = False
else:
self.on_going = True
def __iter__(self):
"""Implementation of __iter__ to allow a standard interface:
for group in Batch(iterable, 10):
do_stuff(group)
"""
while self.on_going:
yield self.group()
|
workforce-data-initiative/skills-utils | skills_utils/fs.py | cache_json | python | def cache_json(filename):
def cache_decorator(cacheable_function):
@wraps(cacheable_function)
def cache_wrapper(*args, **kwargs):
path = CACHE_DIRECTORY + filename
check_create_folder(path)
if os.path.exists(path):
with open(path) as infile:
return json.load(infile)
else:
function_output = cacheable_function(*args, **kwargs)
with open(path, 'w') as outfile:
json.dump(function_output, outfile)
return function_output
return cache_wrapper
return cache_decorator | Caches the JSON-serializable output of the function to a given file
Args:
filename (str) The filename (sans directory) to store the output
Returns: decorator, applicable to a function that produces JSON-serializable output | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/fs.py#L10-L32 | null | """Filesystem-related utilities"""
from functools import wraps
import os
import json
CACHE_DIRECTORY = 'tmp/'
def check_create_folder(filename):
"""Check if the folder exisits. If not, create the folder"""
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
workforce-data-initiative/skills-utils | skills_utils/fs.py | check_create_folder | python | def check_create_folder(filename):
os.makedirs(os.path.dirname(filename), exist_ok=True) | Check if the folder exisits. If not, create the folder | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/fs.py#L35-L37 | null | """Filesystem-related utilities"""
from functools import wraps
import os
import json
CACHE_DIRECTORY = 'tmp/'
def cache_json(filename):
"""Caches the JSON-serializable output of the function to a given file
Args:
filename (str) The filename (sans directory) to store the output
Returns: decorator, applicable to a function that produces JSON-serializable output
"""
def cache_decorator(cacheable_function):
@wraps(cacheable_function)
def cache_wrapper(*args, **kwargs):
path = CACHE_DIRECTORY + filename
check_create_folder(path)
if os.path.exists(path):
with open(path) as infile:
return json.load(infile)
else:
function_output = cacheable_function(*args, **kwargs)
with open(path, 'w') as outfile:
json.dump(function_output, outfile)
return function_output
return cache_wrapper
return cache_decorator
|
workforce-data-initiative/skills-utils | skills_utils/job_posting_import.py | JobPostingImportBase.postings | python | def postings(self, quarter, stats_counter=None):
logging.info('Finding postings for %s', quarter)
for posting in self._iter_postings(quarter):
transformed = self._transform(posting)
transformed['id'] = '{}_{}'.format(
self.partner_id,
self._id(posting)
)
if stats_counter:
stats_counter.track(
input_document=posting,
output_document=transformed
)
yield transformed | Yield job postings in common schema format
Args:
quarter (str) The quarter, in format '2015Q1'
stats_counter (object, optional) A counter that can track both
input and output documents using a 'track' method. | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/job_posting_import.py#L21-L41 | [
"def _id(self, document):\n \"\"\"Given a document, compute a source-specific id for the job posting.\n To be implemented by subclasses\n\n Args:\n document - The document, in original form\n\n Returns: (str) an id for the document\n \"\"\"\n pass\n",
"def _iter_postings(self, quarter):\n... | class JobPostingImportBase(object):
"""Base class for extracting and transforming job postings from
some source into a common schema. (http://schema.org/JobPosting)
Subclasses must implement _id, _iter_postings, and _transform.
Args:
partner_id (str) An short identifier for the partner (ie NLX, VA)
s3_conn (boto.s3.connection) an s3 connection, if needed
onet_cache (skills_ml.onet_cache.OnetCache) a wrapper for ONET data, if needed
"""
def __init__(self, partner_id, s3_conn=None, onet_cache=None):
self.partner_id = partner_id
self.s3_conn = s3_conn
self.onet_cache = onet_cache
def _id(self, document):
"""Given a document, compute a source-specific id for the job posting.
To be implemented by subclasses
Args:
document - The document, in original form
Returns: (str) an id for the document
"""
pass
def _iter_postings(self, quarter):
"""Given a quarter, yield all relevant raw job posting documents.
To be implemented by subclasses
Args:
quarter (str) The quarter, in format '2015Q1'
Yields: job posting documents in their original format
"""
pass
def _transform(self, document):
"""Given a job posting document, transform it into the common schema.
(http://schema.org/JobPosting)
To be implemented by subclasses.
Args:
document - The document, in original form
Returns: (dict) The job posting, in common schema form
"""
pass
|
workforce-data-initiative/skills-utils | skills_utils/time.py | quarter_to_daterange | python | def quarter_to_daterange(quarter):
assert len(quarter) == 6
year = int(quarter[0:4])
quarter = quarter[5]
MONTH_DAY = {
'1': ((1, 1), (3, 31)),
'2': ((4, 1), (6, 30)),
'3': ((7, 1), (9, 30)),
'4': ((10, 1), (12, 31))
}
md = MONTH_DAY[quarter]
start_md, end_md = md
return (
date(year, *start_md),
date(year, *end_md)
) | Convert a quarter in arbitrary filename-ready format (e.g. 2015Q1)
into start and end datetimes | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/time.py#L6-L23 | null | """Time utilities"""
from datetime import date, timedelta
import math
def datetime_to_year_quarter(dt):
"""
Args:
dt: a datetime
Returns:
tuple of the datetime's year and quarter
"""
year = dt.year
quarter = int(math.ceil(float(dt.month)/3))
return (year, quarter)
def datetime_to_quarter(dt):
"""
Args:
dt: a datetime
Returns:
the datetime's quarter in string format (2015Q1)
"""
year, quarter = datetime_to_year_quarter(dt)
return '{}Q{}'.format(year, quarter)
def overlaps(start_one, end_one, start_two, end_two):
return start_one <= end_two and start_two <= end_one
def dates_in_range(start_date, end_date):
"""Returns all dates between two dates.
Inclusive of the start date but not the end date.
Args:
start_date (datetime.date)
end_date (datetime.date)
Returns:
(list) of datetime.date objects
"""
return [
start_date + timedelta(n)
for n in range(int((end_date - start_date).days))
]
|
workforce-data-initiative/skills-utils | skills_utils/time.py | datetime_to_year_quarter | python | def datetime_to_year_quarter(dt):
year = dt.year
quarter = int(math.ceil(float(dt.month)/3))
return (year, quarter) | Args:
dt: a datetime
Returns:
tuple of the datetime's year and quarter | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/time.py#L26-L35 | null | """Time utilities"""
from datetime import date, timedelta
import math
def quarter_to_daterange(quarter):
"""Convert a quarter in arbitrary filename-ready format (e.g. 2015Q1)
into start and end datetimes"""
assert len(quarter) == 6
year = int(quarter[0:4])
quarter = quarter[5]
MONTH_DAY = {
'1': ((1, 1), (3, 31)),
'2': ((4, 1), (6, 30)),
'3': ((7, 1), (9, 30)),
'4': ((10, 1), (12, 31))
}
md = MONTH_DAY[quarter]
start_md, end_md = md
return (
date(year, *start_md),
date(year, *end_md)
)
def datetime_to_quarter(dt):
"""
Args:
dt: a datetime
Returns:
the datetime's quarter in string format (2015Q1)
"""
year, quarter = datetime_to_year_quarter(dt)
return '{}Q{}'.format(year, quarter)
def overlaps(start_one, end_one, start_two, end_two):
return start_one <= end_two and start_two <= end_one
def dates_in_range(start_date, end_date):
"""Returns all dates between two dates.
Inclusive of the start date but not the end date.
Args:
start_date (datetime.date)
end_date (datetime.date)
Returns:
(list) of datetime.date objects
"""
return [
start_date + timedelta(n)
for n in range(int((end_date - start_date).days))
]
|
workforce-data-initiative/skills-utils | skills_utils/time.py | dates_in_range | python | def dates_in_range(start_date, end_date):
return [
start_date + timedelta(n)
for n in range(int((end_date - start_date).days))
] | Returns all dates between two dates.
Inclusive of the start date but not the end date.
Args:
start_date (datetime.date)
end_date (datetime.date)
Returns:
(list) of datetime.date objects | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/time.py#L53-L68 | null | """Time utilities"""
from datetime import date, timedelta
import math
def quarter_to_daterange(quarter):
"""Convert a quarter in arbitrary filename-ready format (e.g. 2015Q1)
into start and end datetimes"""
assert len(quarter) == 6
year = int(quarter[0:4])
quarter = quarter[5]
MONTH_DAY = {
'1': ((1, 1), (3, 31)),
'2': ((4, 1), (6, 30)),
'3': ((7, 1), (9, 30)),
'4': ((10, 1), (12, 31))
}
md = MONTH_DAY[quarter]
start_md, end_md = md
return (
date(year, *start_md),
date(year, *end_md)
)
def datetime_to_year_quarter(dt):
"""
Args:
dt: a datetime
Returns:
tuple of the datetime's year and quarter
"""
year = dt.year
quarter = int(math.ceil(float(dt.month)/3))
return (year, quarter)
def datetime_to_quarter(dt):
"""
Args:
dt: a datetime
Returns:
the datetime's quarter in string format (2015Q1)
"""
year, quarter = datetime_to_year_quarter(dt)
return '{}Q{}'.format(year, quarter)
def overlaps(start_one, end_one, start_two, end_two):
return start_one <= end_two and start_two <= end_one
|
workforce-data-initiative/skills-utils | skills_utils/io.py | stream_json_file | python | def stream_json_file(local_file):
for i, line in enumerate(local_file):
try:
data = json.loads(line.decode('utf-8'))
yield data
except ValueError as e:
logging.warning("Skipping line %d due to error: %s", i, e)
continue | Stream a JSON file (in JSON-per-line format)
Args:
local_file (file-like object) an open file-handle that contains a
JSON string on each line
Yields:
(dict) JSON objects | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/io.py#L6-L21 | null | """I/O utilities"""
import json
import logging
|
workforce-data-initiative/skills-utils | skills_utils/es.py | basic_client | python | def basic_client():
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return ES | Returns an Elasticsearch basic client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L16-L29 | null | """Elasticsearch utilities"""
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.client import IndicesClient
from elasticsearch.helpers import streaming_bulk
import contextlib
import logging
import os
import time
import uuid
HOSTNAME = os.getenv('ELASTICSEARCH_ENDPOINT', 'localhost:9200')
def indices_client():
"""Returns an Elasticsearch indices client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return IndicesClient(ES)
def create_index(index_name, index_config, client):
"""Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client
"""
client.create(index=index_name, body=index_config)
def get_index_from_alias(alias_name, index_client=None):
"""Retrieve the base index name from an alias
Args:
alias_name (str) Name of the alias
index_client (Elasticsearch.IndicesClient) an Elasticsearch index
client. Optional, will create one if not given
Returns: (str) Name of index
"""
index_client = index_client or indices_client()
if not index_client.exists_alias(name=alias_name):
return None
return list(index_client.get_alias(name=alias_name).keys())[0]
def atomic_swap(alias_name, new_index_name, index_client):
"""Points an alias to a new index, then delete the old index if needed
Uses client.update_aliases to perform this with zero downtime
Args:
alias_name (str) Name of the alias
new_index_name (str) The new index that the alias should point to
index_client (Elasticsearch.IndicesClient) Elasticsearch index client
"""
logging.info('Performing atomic index alias swap')
if index_client.exists_alias(name=alias_name):
old_index_name = get_index_from_alias(alias_name, index_client)
logging.info('Removing old as well as adding new')
actions = {'actions': [
{'remove': {'index': old_index_name, 'alias': alias_name}},
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
index_client.delete(index=old_index_name)
else:
logging.info('Old alias not found, only adding new')
actions = {'actions': [
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
@contextlib.contextmanager
def zero_downtime_index(index_name, index_config):
"""Context manager to create a new index based on a given alias,
allow the caller to index it, and then point the alias to the new index
Args:
index_name (str) Name of an alias that should point to the new index
index_config (dict) Configuration for the new index
Yields: (name) The full name of the new index
"""
client = indices_client()
temporary_name = index_name + '_' + str(uuid.uuid4())
logging.info('creating index with config %s', index_config)
create_index(temporary_name, index_config, client)
try:
yield temporary_name
atomic_swap(index_name, temporary_name, client)
except Exception:
logging.error(
'deleting temporary index %s due to error:',
temporary_name,
exc_info=True
)
client.delete(index=temporary_name)
class ElasticsearchIndexerBase(object):
def __init__(self, s3_conn, es_client):
"""
Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client
"""
self.s3_conn = s3_conn
self.es_client = es_client
def index_config(self):
"""Combines setting and mapping config into a full index configuration
Returns: dict
"""
return {
'settings': self.settings,
'mappings': self.mappings
}
def replace(self):
"""Replace index with a new one
zero_downtime_index for safety and rollback
"""
with zero_downtime_index(self.alias_name, self.index_config()) as target_index:
self.index_all(target_index)
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index)
def index_all(self, index_name):
"""Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index
"""
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
)
|
workforce-data-initiative/skills-utils | skills_utils/es.py | create_index | python | def create_index(index_name, index_config, client):
client.create(index=index_name, body=index_config) | Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L48-L56 | null | """Elasticsearch utilities"""
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.client import IndicesClient
from elasticsearch.helpers import streaming_bulk
import contextlib
import logging
import os
import time
import uuid
HOSTNAME = os.getenv('ELASTICSEARCH_ENDPOINT', 'localhost:9200')
def basic_client():
"""Returns an Elasticsearch basic client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return ES
def indices_client():
"""Returns an Elasticsearch indices client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return IndicesClient(ES)
def get_index_from_alias(alias_name, index_client=None):
"""Retrieve the base index name from an alias
Args:
alias_name (str) Name of the alias
index_client (Elasticsearch.IndicesClient) an Elasticsearch index
client. Optional, will create one if not given
Returns: (str) Name of index
"""
index_client = index_client or indices_client()
if not index_client.exists_alias(name=alias_name):
return None
return list(index_client.get_alias(name=alias_name).keys())[0]
def atomic_swap(alias_name, new_index_name, index_client):
"""Points an alias to a new index, then delete the old index if needed
Uses client.update_aliases to perform this with zero downtime
Args:
alias_name (str) Name of the alias
new_index_name (str) The new index that the alias should point to
index_client (Elasticsearch.IndicesClient) Elasticsearch index client
"""
logging.info('Performing atomic index alias swap')
if index_client.exists_alias(name=alias_name):
old_index_name = get_index_from_alias(alias_name, index_client)
logging.info('Removing old as well as adding new')
actions = {'actions': [
{'remove': {'index': old_index_name, 'alias': alias_name}},
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
index_client.delete(index=old_index_name)
else:
logging.info('Old alias not found, only adding new')
actions = {'actions': [
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
@contextlib.contextmanager
def zero_downtime_index(index_name, index_config):
"""Context manager to create a new index based on a given alias,
allow the caller to index it, and then point the alias to the new index
Args:
index_name (str) Name of an alias that should point to the new index
index_config (dict) Configuration for the new index
Yields: (name) The full name of the new index
"""
client = indices_client()
temporary_name = index_name + '_' + str(uuid.uuid4())
logging.info('creating index with config %s', index_config)
create_index(temporary_name, index_config, client)
try:
yield temporary_name
atomic_swap(index_name, temporary_name, client)
except Exception:
logging.error(
'deleting temporary index %s due to error:',
temporary_name,
exc_info=True
)
client.delete(index=temporary_name)
class ElasticsearchIndexerBase(object):
def __init__(self, s3_conn, es_client):
"""
Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client
"""
self.s3_conn = s3_conn
self.es_client = es_client
def index_config(self):
"""Combines setting and mapping config into a full index configuration
Returns: dict
"""
return {
'settings': self.settings,
'mappings': self.mappings
}
def replace(self):
"""Replace index with a new one
zero_downtime_index for safety and rollback
"""
with zero_downtime_index(self.alias_name, self.index_config()) as target_index:
self.index_all(target_index)
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index)
def index_all(self, index_name):
"""Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index
"""
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
)
|
workforce-data-initiative/skills-utils | skills_utils/es.py | get_index_from_alias | python | def get_index_from_alias(alias_name, index_client=None):
index_client = index_client or indices_client()
if not index_client.exists_alias(name=alias_name):
return None
return list(index_client.get_alias(name=alias_name).keys())[0] | Retrieve the base index name from an alias
Args:
alias_name (str) Name of the alias
index_client (Elasticsearch.IndicesClient) an Elasticsearch index
client. Optional, will create one if not given
Returns: (str) Name of index | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L59-L72 | [
"def indices_client():\n \"\"\"Returns an Elasticsearch indices client that is responsive\n to the environment variable ELASTICSEARCH_ENDPOINT\"\"\"\n es_connected = False\n while not es_connected:\n try:\n ES = Elasticsearch(\n hosts=[HOSTNAME]\n )\n ... | """Elasticsearch utilities"""
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.client import IndicesClient
from elasticsearch.helpers import streaming_bulk
import contextlib
import logging
import os
import time
import uuid
HOSTNAME = os.getenv('ELASTICSEARCH_ENDPOINT', 'localhost:9200')
def basic_client():
"""Returns an Elasticsearch basic client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return ES
def indices_client():
"""Returns an Elasticsearch indices client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return IndicesClient(ES)
def create_index(index_name, index_config, client):
"""Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client
"""
client.create(index=index_name, body=index_config)
def atomic_swap(alias_name, new_index_name, index_client):
"""Points an alias to a new index, then delete the old index if needed
Uses client.update_aliases to perform this with zero downtime
Args:
alias_name (str) Name of the alias
new_index_name (str) The new index that the alias should point to
index_client (Elasticsearch.IndicesClient) Elasticsearch index client
"""
logging.info('Performing atomic index alias swap')
if index_client.exists_alias(name=alias_name):
old_index_name = get_index_from_alias(alias_name, index_client)
logging.info('Removing old as well as adding new')
actions = {'actions': [
{'remove': {'index': old_index_name, 'alias': alias_name}},
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
index_client.delete(index=old_index_name)
else:
logging.info('Old alias not found, only adding new')
actions = {'actions': [
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
@contextlib.contextmanager
def zero_downtime_index(index_name, index_config):
"""Context manager to create a new index based on a given alias,
allow the caller to index it, and then point the alias to the new index
Args:
index_name (str) Name of an alias that should point to the new index
index_config (dict) Configuration for the new index
Yields: (name) The full name of the new index
"""
client = indices_client()
temporary_name = index_name + '_' + str(uuid.uuid4())
logging.info('creating index with config %s', index_config)
create_index(temporary_name, index_config, client)
try:
yield temporary_name
atomic_swap(index_name, temporary_name, client)
except Exception:
logging.error(
'deleting temporary index %s due to error:',
temporary_name,
exc_info=True
)
client.delete(index=temporary_name)
class ElasticsearchIndexerBase(object):
def __init__(self, s3_conn, es_client):
"""
Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client
"""
self.s3_conn = s3_conn
self.es_client = es_client
def index_config(self):
"""Combines setting and mapping config into a full index configuration
Returns: dict
"""
return {
'settings': self.settings,
'mappings': self.mappings
}
def replace(self):
"""Replace index with a new one
zero_downtime_index for safety and rollback
"""
with zero_downtime_index(self.alias_name, self.index_config()) as target_index:
self.index_all(target_index)
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index)
def index_all(self, index_name):
"""Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index
"""
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
)
|
workforce-data-initiative/skills-utils | skills_utils/es.py | atomic_swap | python | def atomic_swap(alias_name, new_index_name, index_client):
logging.info('Performing atomic index alias swap')
if index_client.exists_alias(name=alias_name):
old_index_name = get_index_from_alias(alias_name, index_client)
logging.info('Removing old as well as adding new')
actions = {'actions': [
{'remove': {'index': old_index_name, 'alias': alias_name}},
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
index_client.delete(index=old_index_name)
else:
logging.info('Old alias not found, only adding new')
actions = {'actions': [
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions) | Points an alias to a new index, then delete the old index if needed
Uses client.update_aliases to perform this with zero downtime
Args:
alias_name (str) Name of the alias
new_index_name (str) The new index that the alias should point to
index_client (Elasticsearch.IndicesClient) Elasticsearch index client | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L75-L100 | [
"def get_index_from_alias(alias_name, index_client=None):\n \"\"\"Retrieve the base index name from an alias\n\n Args:\n alias_name (str) Name of the alias\n index_client (Elasticsearch.IndicesClient) an Elasticsearch index\n client. Optional, will create one if not given\n\n Retur... | """Elasticsearch utilities"""
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.client import IndicesClient
from elasticsearch.helpers import streaming_bulk
import contextlib
import logging
import os
import time
import uuid
HOSTNAME = os.getenv('ELASTICSEARCH_ENDPOINT', 'localhost:9200')
def basic_client():
"""Returns an Elasticsearch basic client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return ES
def indices_client():
"""Returns an Elasticsearch indices client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return IndicesClient(ES)
def create_index(index_name, index_config, client):
"""Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client
"""
client.create(index=index_name, body=index_config)
def get_index_from_alias(alias_name, index_client=None):
"""Retrieve the base index name from an alias
Args:
alias_name (str) Name of the alias
index_client (Elasticsearch.IndicesClient) an Elasticsearch index
client. Optional, will create one if not given
Returns: (str) Name of index
"""
index_client = index_client or indices_client()
if not index_client.exists_alias(name=alias_name):
return None
return list(index_client.get_alias(name=alias_name).keys())[0]
@contextlib.contextmanager
def zero_downtime_index(index_name, index_config):
"""Context manager to create a new index based on a given alias,
allow the caller to index it, and then point the alias to the new index
Args:
index_name (str) Name of an alias that should point to the new index
index_config (dict) Configuration for the new index
Yields: (name) The full name of the new index
"""
client = indices_client()
temporary_name = index_name + '_' + str(uuid.uuid4())
logging.info('creating index with config %s', index_config)
create_index(temporary_name, index_config, client)
try:
yield temporary_name
atomic_swap(index_name, temporary_name, client)
except Exception:
logging.error(
'deleting temporary index %s due to error:',
temporary_name,
exc_info=True
)
client.delete(index=temporary_name)
class ElasticsearchIndexerBase(object):
def __init__(self, s3_conn, es_client):
"""
Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client
"""
self.s3_conn = s3_conn
self.es_client = es_client
def index_config(self):
"""Combines setting and mapping config into a full index configuration
Returns: dict
"""
return {
'settings': self.settings,
'mappings': self.mappings
}
def replace(self):
"""Replace index with a new one
zero_downtime_index for safety and rollback
"""
with zero_downtime_index(self.alias_name, self.index_config()) as target_index:
self.index_all(target_index)
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index)
def index_all(self, index_name):
"""Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index
"""
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
)
|
workforce-data-initiative/skills-utils | skills_utils/es.py | zero_downtime_index | python | def zero_downtime_index(index_name, index_config):
client = indices_client()
temporary_name = index_name + '_' + str(uuid.uuid4())
logging.info('creating index with config %s', index_config)
create_index(temporary_name, index_config, client)
try:
yield temporary_name
atomic_swap(index_name, temporary_name, client)
except Exception:
logging.error(
'deleting temporary index %s due to error:',
temporary_name,
exc_info=True
)
client.delete(index=temporary_name) | Context manager to create a new index based on a given alias,
allow the caller to index it, and then point the alias to the new index
Args:
index_name (str) Name of an alias that should point to the new index
index_config (dict) Configuration for the new index
Yields: (name) The full name of the new index | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L104-L127 | [
"def indices_client():\n \"\"\"Returns an Elasticsearch indices client that is responsive\n to the environment variable ELASTICSEARCH_ENDPOINT\"\"\"\n es_connected = False\n while not es_connected:\n try:\n ES = Elasticsearch(\n hosts=[HOSTNAME]\n )\n ... | """Elasticsearch utilities"""
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.client import IndicesClient
from elasticsearch.helpers import streaming_bulk
import contextlib
import logging
import os
import time
import uuid
HOSTNAME = os.getenv('ELASTICSEARCH_ENDPOINT', 'localhost:9200')
def basic_client():
"""Returns an Elasticsearch basic client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return ES
def indices_client():
"""Returns an Elasticsearch indices client that is responsive
to the environment variable ELASTICSEARCH_ENDPOINT"""
es_connected = False
while not es_connected:
try:
ES = Elasticsearch(
hosts=[HOSTNAME]
)
es_connected = True
except TransportError as e:
logging.info('Not yet connected: %s, sleeping for 1s', e)
time.sleep(1)
return IndicesClient(ES)
def create_index(index_name, index_config, client):
"""Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client
"""
client.create(index=index_name, body=index_config)
def get_index_from_alias(alias_name, index_client=None):
"""Retrieve the base index name from an alias
Args:
alias_name (str) Name of the alias
index_client (Elasticsearch.IndicesClient) an Elasticsearch index
client. Optional, will create one if not given
Returns: (str) Name of index
"""
index_client = index_client or indices_client()
if not index_client.exists_alias(name=alias_name):
return None
return list(index_client.get_alias(name=alias_name).keys())[0]
def atomic_swap(alias_name, new_index_name, index_client):
"""Points an alias to a new index, then delete the old index if needed
Uses client.update_aliases to perform this with zero downtime
Args:
alias_name (str) Name of the alias
new_index_name (str) The new index that the alias should point to
index_client (Elasticsearch.IndicesClient) Elasticsearch index client
"""
logging.info('Performing atomic index alias swap')
if index_client.exists_alias(name=alias_name):
old_index_name = get_index_from_alias(alias_name, index_client)
logging.info('Removing old as well as adding new')
actions = {'actions': [
{'remove': {'index': old_index_name, 'alias': alias_name}},
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
index_client.delete(index=old_index_name)
else:
logging.info('Old alias not found, only adding new')
actions = {'actions': [
{'add': {'index': new_index_name, 'alias': alias_name}}
]}
index_client.update_aliases(body=actions)
@contextlib.contextmanager
class ElasticsearchIndexerBase(object):
def __init__(self, s3_conn, es_client):
"""
Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client
"""
self.s3_conn = s3_conn
self.es_client = es_client
def index_config(self):
"""Combines setting and mapping config into a full index configuration
Returns: dict
"""
return {
'settings': self.settings,
'mappings': self.mappings
}
def replace(self):
"""Replace index with a new one
zero_downtime_index for safety and rollback
"""
with zero_downtime_index(self.alias_name, self.index_config()) as target_index:
self.index_all(target_index)
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index)
def index_all(self, index_name):
"""Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index
"""
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
)
|
workforce-data-initiative/skills-utils | skills_utils/es.py | ElasticsearchIndexerBase.replace | python | def replace(self):
with zero_downtime_index(self.alias_name, self.index_config()) as target_index:
self.index_all(target_index) | Replace index with a new one
zero_downtime_index for safety and rollback | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L154-L159 | [
"def index_config(self):\n \"\"\"Combines setting and mapping config into a full index configuration\n Returns: dict\n \"\"\"\n return {\n 'settings': self.settings,\n 'mappings': self.mappings\n }\n",
"def index_all(self, index_name):\n \"\"\"Index all available documents, using s... | class ElasticsearchIndexerBase(object):
def __init__(self, s3_conn, es_client):
"""
Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client
"""
self.s3_conn = s3_conn
self.es_client = es_client
def index_config(self):
"""Combines setting and mapping config into a full index configuration
Returns: dict
"""
return {
'settings': self.settings,
'mappings': self.mappings
}
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index)
def index_all(self, index_name):
"""Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index
"""
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
)
|
workforce-data-initiative/skills-utils | skills_utils/es.py | ElasticsearchIndexerBase.append | python | def append(self):
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index) | Index documents onto an existing index | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L161-L167 | [
"def get_index_from_alias(alias_name, index_client=None):\n \"\"\"Retrieve the base index name from an alias\n\n Args:\n alias_name (str) Name of the alias\n index_client (Elasticsearch.IndicesClient) an Elasticsearch index\n client. Optional, will create one if not given\n\n Retur... | class ElasticsearchIndexerBase(object):
def __init__(self, s3_conn, es_client):
"""
Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client
"""
self.s3_conn = s3_conn
self.es_client = es_client
def index_config(self):
"""Combines setting and mapping config into a full index configuration
Returns: dict
"""
return {
'settings': self.settings,
'mappings': self.mappings
}
def replace(self):
"""Replace index with a new one
zero_downtime_index for safety and rollback
"""
with zero_downtime_index(self.alias_name, self.index_config()) as target_index:
self.index_all(target_index)
def index_all(self, index_name):
"""Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index
"""
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
)
|
workforce-data-initiative/skills-utils | skills_utils/es.py | ElasticsearchIndexerBase.index_all | python | def index_all(self, index_name):
oks = 0
notoks = 0
for ok, item in streaming_bulk(
self.es_client,
self._iter_documents(index_name)
):
if ok:
oks += 1
else:
notoks += 1
logging.info(
"Import results: %d ok, %d not ok",
oks,
notoks
) | Index all available documents, using streaming_bulk for speed
Args:
index_name (string): The index | train | https://github.com/workforce-data-initiative/skills-utils/blob/4cf9b7c2938984f34bbcc33d45482d23c52c7539/skills_utils/es.py#L169-L189 | null | class ElasticsearchIndexerBase(object):
def __init__(self, s3_conn, es_client):
"""
Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client
"""
self.s3_conn = s3_conn
self.es_client = es_client
def index_config(self):
"""Combines setting and mapping config into a full index configuration
Returns: dict
"""
return {
'settings': self.settings,
'mappings': self.mappings
}
def replace(self):
"""Replace index with a new one
zero_downtime_index for safety and rollback
"""
with zero_downtime_index(self.alias_name, self.index_config()) as target_index:
self.index_all(target_index)
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/xml_prismjs.py | render | python | def render(file):
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
file_content = fp.read().decode(encoding)
parsed_xml = xml.dom.minidom.parseString(file_content)
return parsed_xml.toprettyxml(indent=' ', newl='') | Pretty print the XML file for rendering. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/xml_prismjs.py#L22-L28 | [
"def detect_encoding(fp, default=None):\n \"\"\"Detect the cahracter encoding of a file.\n\n :param fp: Open Python file pointer.\n :param default: Fallback encoding to use.\n :returns: The detected encoding.\n\n .. note:: The file pointer is returned at its original read position.\n \"\"\"\n i... | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Previews an XML file."""
from __future__ import absolute_import, print_function
import xml.dom.minidom
from flask import current_app, render_template
from ..utils import detect_encoding
previewable_extensions = ['xml']
def validate_xml(file):
"""Validate an XML file."""
max_file_size = current_app.config.get(
'PREVIEWER_MAX_FILE_SIZE_BYTES', 1 * 1024 * 1024)
if file.size > max_file_size:
return False
with file.open() as fp:
try:
content = fp.read().decode('utf-8')
xml.dom.minidom.parseString(content)
return True
except:
return False
def can_preview(file):
"""Determine if the given file can be previewed."""
return (file.is_local() and
file.has_extensions('.xml') and
validate_xml(file))
def preview(file):
"""Render appropiate template with embed flag."""
return render_template(
'invenio_previewer/xml_prismjs.html',
file=file,
content=render(file),
js_bundles=['previewer_prism_js'],
css_bundles=['previewer_prism_css'],
)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/xml_prismjs.py | validate_xml | python | def validate_xml(file):
max_file_size = current_app.config.get(
'PREVIEWER_MAX_FILE_SIZE_BYTES', 1 * 1024 * 1024)
if file.size > max_file_size:
return False
with file.open() as fp:
try:
content = fp.read().decode('utf-8')
xml.dom.minidom.parseString(content)
return True
except:
return False | Validate an XML file. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/xml_prismjs.py#L31-L44 | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Previews an XML file."""
from __future__ import absolute_import, print_function
import xml.dom.minidom
from flask import current_app, render_template
from ..utils import detect_encoding
previewable_extensions = ['xml']
def render(file):
"""Pretty print the XML file for rendering."""
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
file_content = fp.read().decode(encoding)
parsed_xml = xml.dom.minidom.parseString(file_content)
return parsed_xml.toprettyxml(indent=' ', newl='')
def can_preview(file):
"""Determine if the given file can be previewed."""
return (file.is_local() and
file.has_extensions('.xml') and
validate_xml(file))
def preview(file):
"""Render appropiate template with embed flag."""
return render_template(
'invenio_previewer/xml_prismjs.html',
file=file,
content=render(file),
js_bundles=['previewer_prism_js'],
css_bundles=['previewer_prism_css'],
)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/xml_prismjs.py | preview | python | def preview(file):
return render_template(
'invenio_previewer/xml_prismjs.html',
file=file,
content=render(file),
js_bundles=['previewer_prism_js'],
css_bundles=['previewer_prism_css'],
) | Render appropiate template with embed flag. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/xml_prismjs.py#L54-L62 | [
"def render(file):\n \"\"\"Pretty print the XML file for rendering.\"\"\"\n with file.open() as fp:\n encoding = detect_encoding(fp, default='utf-8')\n file_content = fp.read().decode(encoding)\n parsed_xml = xml.dom.minidom.parseString(file_content)\n return parsed_xml.toprettyxml... | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Previews an XML file."""
from __future__ import absolute_import, print_function
import xml.dom.minidom
from flask import current_app, render_template
from ..utils import detect_encoding
previewable_extensions = ['xml']
def render(file):
"""Pretty print the XML file for rendering."""
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
file_content = fp.read().decode(encoding)
parsed_xml = xml.dom.minidom.parseString(file_content)
return parsed_xml.toprettyxml(indent=' ', newl='')
def validate_xml(file):
"""Validate an XML file."""
max_file_size = current_app.config.get(
'PREVIEWER_MAX_FILE_SIZE_BYTES', 1 * 1024 * 1024)
if file.size > max_file_size:
return False
with file.open() as fp:
try:
content = fp.read().decode('utf-8')
xml.dom.minidom.parseString(content)
return True
except:
return False
def can_preview(file):
"""Determine if the given file can be previewed."""
return (file.is_local() and
file.has_extensions('.xml') and
validate_xml(file))
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/ipynb.py | render | python | def render(file):
fp = file.open()
content = fp.read()
fp.close()
notebook = nbformat.reads(content.decode('utf-8'), as_version=4)
html_exporter = HTMLExporter()
html_exporter.template_file = 'basic'
(body, resources) = html_exporter.from_notebook_node(notebook)
return body, resources | Generate the result HTML. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/ipynb.py#L18-L29 | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""IPython notebooks previewer."""
from __future__ import absolute_import, unicode_literals
import nbformat
from flask import render_template
from nbconvert import HTMLExporter
def can_preview(file):
"""Determine if file can be previewed."""
return file.is_local() and file.has_extensions('.ipynb')
def preview(file):
"""Render the IPython Notebook."""
body, resources = render(file)
default_ipython_style = resources['inlining']['css'][1]
return render_template(
'invenio_previewer/ipynb.html',
file=file,
content=body,
style=default_ipython_style
)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/ipynb.py | preview | python | def preview(file):
body, resources = render(file)
default_ipython_style = resources['inlining']['css'][1]
return render_template(
'invenio_previewer/ipynb.html',
file=file,
content=body,
style=default_ipython_style
) | Render the IPython Notebook. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/ipynb.py#L37-L46 | [
"def render(file):\n \"\"\"Generate the result HTML.\"\"\"\n fp = file.open()\n content = fp.read()\n fp.close()\n\n notebook = nbformat.reads(content.decode('utf-8'), as_version=4)\n\n html_exporter = HTMLExporter()\n html_exporter.template_file = 'basic'\n (body, resources) = html_exporter... | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""IPython notebooks previewer."""
from __future__ import absolute_import, unicode_literals
import nbformat
from flask import render_template
from nbconvert import HTMLExporter
def render(file):
"""Generate the result HTML."""
fp = file.open()
content = fp.read()
fp.close()
notebook = nbformat.reads(content.decode('utf-8'), as_version=4)
html_exporter = HTMLExporter()
html_exporter.template_file = 'basic'
(body, resources) = html_exporter.from_notebook_node(notebook)
return body, resources
def can_preview(file):
"""Determine if file can be previewed."""
return file.is_local() and file.has_extensions('.ipynb')
|
inveniosoftware/invenio-previewer | invenio_previewer/api.py | PreviewFile.uri | python | def uri(self):
return url_for(
'.{0}_files'.format(self.pid.pid_type),
pid_value=self.pid.pid_value,
filename=self.file.key) | Get file download link.
.. note::
The URI generation assumes that you can download the file using the
view ``invenio_records_ui.<pid_type>_files``. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/api.py#L46-L57 | null | class PreviewFile(object):
"""Preview file default implementation."""
def __init__(self, pid, record, fileobj):
"""Default constructor.
:param file: ObjectVersion instance from Invenio-Files-REST.
"""
self.file = fileobj
self.pid = pid
self.record = record
@property
def size(self):
"""Get file size."""
return self.file['size']
@property
def filename(self):
"""Get filename."""
return basename(self.file.key)
@property
def bucket(self):
"""Get bucket."""
return self.file.bucket_id
@property
def is_local(self):
"""Check if file is local."""
return True
def has_extensions(self, *exts):
"""Check if file has one of the extensions."""
file_ext = splitext(self.filename)[1]
file_ext = file_ext.lower()
for e in exts:
if file_ext == e:
return True
return False
def open(self):
"""Open the file."""
return self.file.file.storage().open()
|
inveniosoftware/invenio-previewer | invenio_previewer/api.py | PreviewFile.has_extensions | python | def has_extensions(self, *exts):
file_ext = splitext(self.filename)[1]
file_ext = file_ext.lower()
for e in exts:
if file_ext == e:
return True
return False | Check if file has one of the extensions. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/api.py#L63-L71 | null | class PreviewFile(object):
"""Preview file default implementation."""
def __init__(self, pid, record, fileobj):
"""Default constructor.
:param file: ObjectVersion instance from Invenio-Files-REST.
"""
self.file = fileobj
self.pid = pid
self.record = record
@property
def size(self):
"""Get file size."""
return self.file['size']
@property
def filename(self):
"""Get filename."""
return basename(self.file.key)
@property
def bucket(self):
"""Get bucket."""
return self.file.bucket_id
@property
def uri(self):
"""Get file download link.
.. note::
The URI generation assumes that you can download the file using the
view ``invenio_records_ui.<pid_type>_files``.
"""
return url_for(
'.{0}_files'.format(self.pid.pid_type),
pid_value=self.pid.pid_value,
filename=self.file.key)
def is_local(self):
"""Check if file is local."""
return True
def open(self):
"""Open the file."""
return self.file.file.storage().open()
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/json_prismjs.py | render | python | def render(file):
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
file_content = fp.read().decode(encoding)
json_data = json.loads(file_content, object_pairs_hook=OrderedDict)
return json.dumps(json_data, indent=4, separators=(',', ': ')) | Pretty print the JSON file for rendering. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/json_prismjs.py#L23-L29 | [
"def detect_encoding(fp, default=None):\n \"\"\"Detect the cahracter encoding of a file.\n\n :param fp: Open Python file pointer.\n :param default: Fallback encoding to use.\n :returns: The detected encoding.\n\n .. note:: The file pointer is returned at its original read position.\n \"\"\"\n i... | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Previews a JSON file."""
from __future__ import absolute_import, print_function
import json
from collections import OrderedDict
from flask import current_app, render_template
from ..utils import detect_encoding
previewable_extensions = ['json']
def validate_json(file):
"""Validate a JSON file."""
max_file_size = current_app.config.get(
'PREVIEWER_MAX_FILE_SIZE_BYTES', 1 * 1024 * 1024)
if file.size > max_file_size:
return False
with file.open() as fp:
try:
json.loads(fp.read().decode('utf-8'))
return True
except:
return False
def can_preview(file):
"""Determine if the given file can be previewed."""
return (file.is_local() and
file.has_extensions('.json') and
validate_json(file))
def preview(file):
"""Render appropiate template with embed flag."""
return render_template(
'invenio_previewer/json_prismjs.html',
file=file,
content=render(file),
js_bundles=['previewer_prism_js'],
css_bundles=['previewer_prism_css'],
)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/json_prismjs.py | validate_json | python | def validate_json(file):
max_file_size = current_app.config.get(
'PREVIEWER_MAX_FILE_SIZE_BYTES', 1 * 1024 * 1024)
if file.size > max_file_size:
return False
with file.open() as fp:
try:
json.loads(fp.read().decode('utf-8'))
return True
except:
return False | Validate a JSON file. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/json_prismjs.py#L32-L44 | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Previews a JSON file."""
from __future__ import absolute_import, print_function
import json
from collections import OrderedDict
from flask import current_app, render_template
from ..utils import detect_encoding
previewable_extensions = ['json']
def render(file):
"""Pretty print the JSON file for rendering."""
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
file_content = fp.read().decode(encoding)
json_data = json.loads(file_content, object_pairs_hook=OrderedDict)
return json.dumps(json_data, indent=4, separators=(',', ': '))
def can_preview(file):
"""Determine if the given file can be previewed."""
return (file.is_local() and
file.has_extensions('.json') and
validate_json(file))
def preview(file):
"""Render appropiate template with embed flag."""
return render_template(
'invenio_previewer/json_prismjs.html',
file=file,
content=render(file),
js_bundles=['previewer_prism_js'],
css_bundles=['previewer_prism_css'],
)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/csv_dthreejs.py | validate_csv | python | def validate_csv(file):
try:
# Detect encoding and dialect
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
sample = fp.read(
current_app.config.get('PREVIEWER_CSV_VALIDATION_BYTES', 1024))
delimiter = csv.Sniffer().sniff(sample.decode(encoding)).delimiter
is_valid = True
except Exception as e:
current_app.logger.debug(
'File {0} is not valid CSV: {1}'.format(file.uri, e))
encoding = ''
delimiter = ''
is_valid = False
return {
'delimiter': delimiter,
'encoding': encoding,
'is_valid': is_valid
} | Return dialect information about given csv file. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/csv_dthreejs.py#L23-L44 | [
"def detect_encoding(fp, default=None):\n \"\"\"Detect the cahracter encoding of a file.\n\n :param fp: Open Python file pointer.\n :param default: Fallback encoding to use.\n :returns: The detected encoding.\n\n .. note:: The file pointer is returned at its original read position.\n \"\"\"\n i... | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Render a CSV file using d3.js."""
from __future__ import absolute_import, print_function
import csv
from flask import current_app, render_template
from ..proxies import current_previewer
from ..utils import detect_encoding
previewable_extensions = ['csv', 'dsv']
def can_preview(file):
"""Determine if the given file can be previewed."""
if file.is_local() and file.has_extensions('.csv', '.dsv'):
return validate_csv(file)['is_valid']
return False
def preview(file):
"""Render appropiate template with embed flag."""
file_info = validate_csv(file)
return render_template(
'invenio_previewer/csv_bar.html',
file=file,
delimiter=file_info['delimiter'],
encoding=file_info['encoding'],
js_bundles=current_previewer.js_bundles + ['previewer_csv_js'],
css_bundles=current_previewer.css_bundles,
)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/csv_dthreejs.py | preview | python | def preview(file):
file_info = validate_csv(file)
return render_template(
'invenio_previewer/csv_bar.html',
file=file,
delimiter=file_info['delimiter'],
encoding=file_info['encoding'],
js_bundles=current_previewer.js_bundles + ['previewer_csv_js'],
css_bundles=current_previewer.css_bundles,
) | Render appropiate template with embed flag. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/csv_dthreejs.py#L54-L64 | [
"def validate_csv(file):\n \"\"\"Return dialect information about given csv file.\"\"\"\n try:\n # Detect encoding and dialect\n with file.open() as fp:\n encoding = detect_encoding(fp, default='utf-8')\n sample = fp.read(\n current_app.config.get('PREVIEWER_... | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Render a CSV file using d3.js."""
from __future__ import absolute_import, print_function
import csv
from flask import current_app, render_template
from ..proxies import current_previewer
from ..utils import detect_encoding
previewable_extensions = ['csv', 'dsv']
def validate_csv(file):
"""Return dialect information about given csv file."""
try:
# Detect encoding and dialect
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
sample = fp.read(
current_app.config.get('PREVIEWER_CSV_VALIDATION_BYTES', 1024))
delimiter = csv.Sniffer().sniff(sample.decode(encoding)).delimiter
is_valid = True
except Exception as e:
current_app.logger.debug(
'File {0} is not valid CSV: {1}'.format(file.uri, e))
encoding = ''
delimiter = ''
is_valid = False
return {
'delimiter': delimiter,
'encoding': encoding,
'is_valid': is_valid
}
def can_preview(file):
"""Determine if the given file can be previewed."""
if file.is_local() and file.has_extensions('.csv', '.dsv'):
return validate_csv(file)['is_valid']
return False
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/zip.py | make_tree | python | def make_tree(file):
max_files_count = current_app.config.get('PREVIEWER_ZIP_MAX_FILES', 1000)
tree = {'type': 'folder', 'id': -1, 'children': {}}
try:
with file.open() as fp:
zf = zipfile.ZipFile(fp)
# Detect filenames encoding.
sample = ' '.join(zf.namelist()[:max_files_count])
if not isinstance(sample, binary_type):
sample = sample.encode('utf-16be')
encoding = chardet.detect(sample).get('encoding', 'utf-8')
for i, info in enumerate(zf.infolist()):
if i > max_files_count:
raise BufferError('Too many files inside the ZIP file.')
comps = info.filename.split(os.sep)
node = tree
for c in comps:
if not isinstance(c, text_type):
c = c.decode(encoding)
if c not in node['children']:
if c == '':
node['type'] = 'folder'
continue
node['children'][c] = {
'name': c,
'type': 'item',
'id': 'item{0}'.format(i),
'children': {}
}
node = node['children'][c]
node['size'] = info.file_size
except BufferError:
return tree, True, None
except (zipfile.LargeZipFile):
return tree, False, 'Zipfile is too large to be previewed.'
except Exception as e:
current_app.logger.warning(str(e), exc_info=True)
return tree, False, 'Zipfile is not previewable.'
return tree, False, None | Create tree structure from ZIP archive. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/zip.py#L26-L67 | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Simple ZIP archive previewer."""
from __future__ import absolute_import, print_function
import os
import zipfile
import cchardet as chardet
from flask import current_app, render_template
from six import binary_type
from .._compat import text_type
from ..proxies import current_previewer
previewable_extensions = ['zip']
def children_to_list(node):
"""Organize children structure."""
if node['type'] == 'item' and len(node['children']) == 0:
del node['children']
else:
node['type'] = 'folder'
node['children'] = list(node['children'].values())
node['children'].sort(key=lambda x: x['name'])
node['children'] = map(children_to_list, node['children'])
return node
def can_preview(file):
"""Return True if filetype can be previewed."""
return file.is_local() and file.has_extensions('.zip')
def preview(file):
"""Return appropriate template and pass the file and an embed flag."""
tree, limit_reached, error = make_tree(file)
list = children_to_list(tree)['children']
return render_template(
"invenio_previewer/zip.html",
file=file,
tree=list,
limit_reached=limit_reached,
error=error,
js_bundles=current_previewer.js_bundles + ['previewer_fullscreen_js'],
css_bundles=current_previewer.css_bundles,
)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/zip.py | children_to_list | python | def children_to_list(node):
if node['type'] == 'item' and len(node['children']) == 0:
del node['children']
else:
node['type'] = 'folder'
node['children'] = list(node['children'].values())
node['children'].sort(key=lambda x: x['name'])
node['children'] = map(children_to_list, node['children'])
return node | Organize children structure. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/zip.py#L70-L79 | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Simple ZIP archive previewer."""
from __future__ import absolute_import, print_function
import os
import zipfile
import cchardet as chardet
from flask import current_app, render_template
from six import binary_type
from .._compat import text_type
from ..proxies import current_previewer
previewable_extensions = ['zip']
def make_tree(file):
"""Create tree structure from ZIP archive."""
max_files_count = current_app.config.get('PREVIEWER_ZIP_MAX_FILES', 1000)
tree = {'type': 'folder', 'id': -1, 'children': {}}
try:
with file.open() as fp:
zf = zipfile.ZipFile(fp)
# Detect filenames encoding.
sample = ' '.join(zf.namelist()[:max_files_count])
if not isinstance(sample, binary_type):
sample = sample.encode('utf-16be')
encoding = chardet.detect(sample).get('encoding', 'utf-8')
for i, info in enumerate(zf.infolist()):
if i > max_files_count:
raise BufferError('Too many files inside the ZIP file.')
comps = info.filename.split(os.sep)
node = tree
for c in comps:
if not isinstance(c, text_type):
c = c.decode(encoding)
if c not in node['children']:
if c == '':
node['type'] = 'folder'
continue
node['children'][c] = {
'name': c,
'type': 'item',
'id': 'item{0}'.format(i),
'children': {}
}
node = node['children'][c]
node['size'] = info.file_size
except BufferError:
return tree, True, None
except (zipfile.LargeZipFile):
return tree, False, 'Zipfile is too large to be previewed.'
except Exception as e:
current_app.logger.warning(str(e), exc_info=True)
return tree, False, 'Zipfile is not previewable.'
return tree, False, None
def can_preview(file):
"""Return True if filetype can be previewed."""
return file.is_local() and file.has_extensions('.zip')
def preview(file):
"""Return appropriate template and pass the file and an embed flag."""
tree, limit_reached, error = make_tree(file)
list = children_to_list(tree)['children']
return render_template(
"invenio_previewer/zip.html",
file=file,
tree=list,
limit_reached=limit_reached,
error=error,
js_bundles=current_previewer.js_bundles + ['previewer_fullscreen_js'],
css_bundles=current_previewer.css_bundles,
)
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/zip.py | preview | python | def preview(file):
tree, limit_reached, error = make_tree(file)
list = children_to_list(tree)['children']
return render_template(
"invenio_previewer/zip.html",
file=file,
tree=list,
limit_reached=limit_reached,
error=error,
js_bundles=current_previewer.js_bundles + ['previewer_fullscreen_js'],
css_bundles=current_previewer.css_bundles,
) | Return appropriate template and pass the file and an embed flag. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/zip.py#L87-L99 | [
"def make_tree(file):\n \"\"\"Create tree structure from ZIP archive.\"\"\"\n max_files_count = current_app.config.get('PREVIEWER_ZIP_MAX_FILES', 1000)\n tree = {'type': 'folder', 'id': -1, 'children': {}}\n\n try:\n with file.open() as fp:\n zf = zipfile.ZipFile(fp)\n # Det... | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Simple ZIP archive previewer."""
from __future__ import absolute_import, print_function
import os
import zipfile
import cchardet as chardet
from flask import current_app, render_template
from six import binary_type
from .._compat import text_type
from ..proxies import current_previewer
previewable_extensions = ['zip']
def make_tree(file):
"""Create tree structure from ZIP archive."""
max_files_count = current_app.config.get('PREVIEWER_ZIP_MAX_FILES', 1000)
tree = {'type': 'folder', 'id': -1, 'children': {}}
try:
with file.open() as fp:
zf = zipfile.ZipFile(fp)
# Detect filenames encoding.
sample = ' '.join(zf.namelist()[:max_files_count])
if not isinstance(sample, binary_type):
sample = sample.encode('utf-16be')
encoding = chardet.detect(sample).get('encoding', 'utf-8')
for i, info in enumerate(zf.infolist()):
if i > max_files_count:
raise BufferError('Too many files inside the ZIP file.')
comps = info.filename.split(os.sep)
node = tree
for c in comps:
if not isinstance(c, text_type):
c = c.decode(encoding)
if c not in node['children']:
if c == '':
node['type'] = 'folder'
continue
node['children'][c] = {
'name': c,
'type': 'item',
'id': 'item{0}'.format(i),
'children': {}
}
node = node['children'][c]
node['size'] = info.file_size
except BufferError:
return tree, True, None
except (zipfile.LargeZipFile):
return tree, False, 'Zipfile is too large to be previewed.'
except Exception as e:
current_app.logger.warning(str(e), exc_info=True)
return tree, False, 'Zipfile is not previewable.'
return tree, False, None
def children_to_list(node):
"""Organize children structure."""
if node['type'] == 'item' and len(node['children']) == 0:
del node['children']
else:
node['type'] = 'folder'
node['children'] = list(node['children'].values())
node['children'].sort(key=lambda x: x['name'])
node['children'] = map(children_to_list, node['children'])
return node
def can_preview(file):
"""Return True if filetype can be previewed."""
return file.is_local() and file.has_extensions('.zip')
|
inveniosoftware/invenio-previewer | invenio_previewer/utils.py | detect_encoding | python | def detect_encoding(fp, default=None):
init_pos = fp.tell()
try:
sample = fp.read(
current_app.config.get('PREVIEWER_CHARDET_BYTES', 1024))
# Result contains 'confidence' and 'encoding'
result = cchardet.detect(sample)
threshold = current_app.config.get('PREVIEWER_CHARDET_CONFIDENCE', 0.9)
if result.get('confidence', 0) > threshold:
return result.get('encoding', default)
else:
return default
except Exception:
current_app.logger.warning('Encoding detection failed.', exc_info=True)
return default
finally:
fp.seek(init_pos) | Detect the cahracter encoding of a file.
:param fp: Open Python file pointer.
:param default: Fallback encoding to use.
:returns: The detected encoding.
.. note:: The file pointer is returned at its original read position. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/utils.py#L15-L39 | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio Previewer Utilities."""
import cchardet
from flask import current_app
|
inveniosoftware/invenio-previewer | invenio_previewer/views.py | preview | python | def preview(pid, record, template=None, **kwargs):
# Get file from record
fileobj = current_previewer.record_file_factory(
pid, record, request.view_args.get(
'filename', request.args.get('filename', type=str))
)
if not fileobj:
abort(404)
# Try to see if specific previewer is requested?
try:
file_previewer = fileobj['previewer']
except KeyError:
file_previewer = None
# Find a suitable previewer
fileobj = PreviewFile(pid, record, fileobj)
for plugin in current_previewer.iter_previewers(
previewers=[file_previewer] if file_previewer else None):
if plugin.can_preview(fileobj):
try:
return plugin.preview(fileobj)
except Exception:
current_app.logger.warning(
('Preview failed for {key}, in {pid_type}:{pid_value}'
.format(key=fileobj.file.key,
pid_type=fileobj.pid.pid_type,
pid_value=fileobj.pid.pid_value)),
exc_info=True)
return default.preview(fileobj) | Preview file for given record.
Plug this method into your ``RECORDS_UI_ENDPOINTS`` configuration:
.. code-block:: python
RECORDS_UI_ENDPOINTS = dict(
recid=dict(
# ...
route='/records/<pid_value/preview/<path:filename>',
view_imp='invenio_previewer.views.preview',
record_class='invenio_records_files.api:Record',
)
) | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/views.py#L28-L72 | [
"def preview(file):\n \"\"\"Return appropriate template and passes the file and an embed flag.\"\"\"\n return render_template(\"invenio_previewer/default.html\", file=file)\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""View method for Invenio-Records-UI for previewing files."""
from __future__ import absolute_import, print_function
from flask import Blueprint, abort, current_app, request
from .api import PreviewFile
from .extensions import default
from .proxies import current_previewer
blueprint = Blueprint(
'invenio_previewer',
__name__,
template_folder='templates',
static_folder='static',
)
"""Blueprint used to register template and static folders."""
@blueprint.app_template_test('previewable')
def is_previewable(extension):
"""Test if a file can be previewed checking its extension."""
return extension in current_previewer.previewable_extensions
|
inveniosoftware/invenio-previewer | examples/app.py | fixtures | python | def fixtures():
temp_path = os.path.join(os.path.dirname(__file__), 'temp')
demo_files_path = os.path.join(os.path.dirname(__file__), 'demo_files')
# Create location
loc = Location(name='local', uri=temp_path, default=True)
db.session.add(loc)
db.session.commit()
# Example files from the data folder
demo_files = (
'markdown.md',
'csvfile.csv',
'zipfile.zip',
'jsonfile.json',
'xmlfile.xml',
'notebook.ipynb',
'jpgfile.jpg',
'pngfile.png',
)
rec_uuid = uuid4()
provider = RecordIdProvider.create(object_type='rec', object_uuid=rec_uuid)
data = {
'pid_value': provider.pid.pid_value,
}
record = Record.create(data, id_=rec_uuid)
bucket = Bucket.create()
RecordsBuckets.create(record=record.model, bucket=bucket)
# Add files to the record
for f in demo_files:
with open(os.path.join(demo_files_path, f), 'rb') as fp:
record.files[f] = fp
record.files.flush()
record.commit()
db.session.commit() | Command for working with test data. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/examples/app.py#L87-L126 | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
r"""Minimal Flask application example for development.
1. Setup the application and create demo data:
.. code-block:: console
$ cd examples
$ ./app-setup.py
2. Our record with pid 1 contains several files. You can check out the
different types of files by changing the filename in the url
to one of the following values: markdown.md, csvfile.csv, zipfile.zip,
jsonfile.json, xmlfile.xml, notebook.ipynb, jpgfile.jpg, pngfile.png
`http://localhost:5000/records/1/preview?filename=csvfile.csv`
"""
from __future__ import absolute_import, print_function
import os
from uuid import uuid4
from flask import Flask
from flask_babelex import Babel
from invenio_assets import InvenioAssets
from invenio_db import InvenioDB, db
from invenio_files_rest import InvenioFilesREST
from invenio_files_rest.models import Bucket, Location
from invenio_i18n import InvenioI18N
from invenio_pidstore.providers.recordid import RecordIdProvider
from invenio_records import InvenioRecords
from invenio_records_files.api import Record
from invenio_records_files.models import RecordsBuckets
from invenio_records_ui import InvenioRecordsUI
from invenio_records_ui.views import create_blueprint_from_app
from invenio_previewer import InvenioPreviewer
# Create Flask application
app = Flask(__name__)
app.config.update(
SECRET_KEY='CHANGEME',
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///test.db'),
SQLALCHEMY_TRACK_MODIFICATIONS=True,
RECORDS_UI_DEFAULT_PERMISSION_FACTORY=None,
RECORDS_UI_ENDPOINTS=dict(
recid=dict(
pid_type='recid',
route='/records/<pid_value>',
template='invenio_records_ui/detail.html',
),
recid_files=dict(
pid_type='recid',
route='/records/<pid_value>/files/<filename>',
view_imp='invenio_records_files.utils:file_download_ui',
record_class='invenio_records_files.api:Record',
),
recid_previewer=dict(
pid_type='recid',
route='/records/<pid_value>/preview',
view_imp='invenio_previewer.views:preview',
record_class='invenio_records_files.api:Record',
)
)
)
Babel(app)
InvenioI18N(app)
InvenioDB(app)
InvenioAssets(app)
InvenioRecords(app)
InvenioFilesREST(app)
InvenioPreviewer(app)
InvenioRecordsUI(app)
app.register_blueprint(create_blueprint_from_app(app))
@app.cli.command()
|
inveniosoftware/invenio-previewer | invenio_previewer/extensions/mistune.py | render | python | def render(file):
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
result = mistune.markdown(fp.read().decode(encoding))
return result | Render HTML from Markdown file content. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/extensions/mistune.py#L21-L26 | [
"def detect_encoding(fp, default=None):\n \"\"\"Detect the cahracter encoding of a file.\n\n :param fp: Open Python file pointer.\n :param default: Fallback encoding to use.\n :returns: The detected encoding.\n\n .. note:: The file pointer is returned at its original read position.\n \"\"\"\n i... | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Markdown rendering using mistune library."""
from __future__ import absolute_import, unicode_literals
import mistune
from flask import render_template
from ..utils import detect_encoding
previewable_extensions = ['md']
def can_preview(file):
"""Determine if file can be previewed."""
return file.is_local() and file.has_extensions('.md')
def preview(file):
"""Render Markdown."""
return render_template("invenio_previewer/mistune.html",
file=file,
content=render(file))
|
inveniosoftware/invenio-previewer | invenio_previewer/ext.py | _InvenioPreviewerState.record_file_factory | python | def record_file_factory(self):
try:
get_distribution('invenio-records-files')
from invenio_records_files.utils import record_file_factory
default = record_file_factory
except DistributionNotFound:
def default(pid, record, filename):
return None
return load_or_import_from_config(
'PREVIEWER_RECORD_FILE_FACOTRY',
app=self.app,
default=default,
) | Load default record file factory. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/ext.py#L57-L71 | [
"def load_or_import_from_config(key, app=None, default=None):\n \"\"\"Load or import value from config.\"\"\"\n app = app or current_app\n imp = app.config.get(key)\n return obj_or_import_string(imp, default=default)\n"
] | class _InvenioPreviewerState(object):
"""State object."""
def __init__(self, app, entry_point_group=None):
"""Initialize state."""
self.app = app
self.entry_point_group = entry_point_group
self.previewers = {}
self._previewable_extensions = set()
@cached_property
def previewable_extensions(self):
if self.entry_point_group is not None:
self.load_entry_point_group(self.entry_point_group)
self.entry_point_group = None
return self._previewable_extensions
@cached_property
@property
def css_bundles(self):
return self.app.config['PREVIEWER_BASE_CSS_BUNDLES']
@property
def js_bundles(self):
return self.app.config['PREVIEWER_BASE_JS_BUNDLES']
def register_previewer(self, name, previewer):
"""Register a previewer in the system."""
if name in self.previewers:
assert name not in self.previewers, \
"Previewer with same name already registered"
self.previewers[name] = previewer
if hasattr(previewer, 'previewable_extensions'):
self._previewable_extensions |= set(
previewer.previewable_extensions)
def load_entry_point_group(self, entry_point_group):
"""Load previewers from an entry point group."""
for ep in pkg_resources.iter_entry_points(group=entry_point_group):
self.register_previewer(ep.name, ep.load())
def iter_previewers(self, previewers=None):
"""Get previewers ordered by PREVIEWER_PREVIEWERS_ORDER."""
if self.entry_point_group is not None:
self.load_entry_point_group(self.entry_point_group)
self.entry_point_group = None
previewers = previewers or \
self.app.config.get('PREVIEWER_PREFERENCE', [])
for item in previewers:
if item in self.previewers:
yield self.previewers[item]
|
inveniosoftware/invenio-previewer | invenio_previewer/ext.py | _InvenioPreviewerState.register_previewer | python | def register_previewer(self, name, previewer):
if name in self.previewers:
assert name not in self.previewers, \
"Previewer with same name already registered"
self.previewers[name] = previewer
if hasattr(previewer, 'previewable_extensions'):
self._previewable_extensions |= set(
previewer.previewable_extensions) | Register a previewer in the system. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/ext.py#L81-L89 | null | class _InvenioPreviewerState(object):
"""State object."""
def __init__(self, app, entry_point_group=None):
"""Initialize state."""
self.app = app
self.entry_point_group = entry_point_group
self.previewers = {}
self._previewable_extensions = set()
@cached_property
def previewable_extensions(self):
if self.entry_point_group is not None:
self.load_entry_point_group(self.entry_point_group)
self.entry_point_group = None
return self._previewable_extensions
@cached_property
def record_file_factory(self):
"""Load default record file factory."""
try:
get_distribution('invenio-records-files')
from invenio_records_files.utils import record_file_factory
default = record_file_factory
except DistributionNotFound:
def default(pid, record, filename):
return None
return load_or_import_from_config(
'PREVIEWER_RECORD_FILE_FACOTRY',
app=self.app,
default=default,
)
@property
def css_bundles(self):
return self.app.config['PREVIEWER_BASE_CSS_BUNDLES']
@property
def js_bundles(self):
return self.app.config['PREVIEWER_BASE_JS_BUNDLES']
def load_entry_point_group(self, entry_point_group):
"""Load previewers from an entry point group."""
for ep in pkg_resources.iter_entry_points(group=entry_point_group):
self.register_previewer(ep.name, ep.load())
def iter_previewers(self, previewers=None):
"""Get previewers ordered by PREVIEWER_PREVIEWERS_ORDER."""
if self.entry_point_group is not None:
self.load_entry_point_group(self.entry_point_group)
self.entry_point_group = None
previewers = previewers or \
self.app.config.get('PREVIEWER_PREFERENCE', [])
for item in previewers:
if item in self.previewers:
yield self.previewers[item]
|
inveniosoftware/invenio-previewer | invenio_previewer/ext.py | _InvenioPreviewerState.iter_previewers | python | def iter_previewers(self, previewers=None):
if self.entry_point_group is not None:
self.load_entry_point_group(self.entry_point_group)
self.entry_point_group = None
previewers = previewers or \
self.app.config.get('PREVIEWER_PREFERENCE', [])
for item in previewers:
if item in self.previewers:
yield self.previewers[item] | Get previewers ordered by PREVIEWER_PREVIEWERS_ORDER. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/ext.py#L96-L107 | [
"def load_entry_point_group(self, entry_point_group):\n \"\"\"Load previewers from an entry point group.\"\"\"\n for ep in pkg_resources.iter_entry_points(group=entry_point_group):\n self.register_previewer(ep.name, ep.load())\n"
] | class _InvenioPreviewerState(object):
"""State object."""
def __init__(self, app, entry_point_group=None):
"""Initialize state."""
self.app = app
self.entry_point_group = entry_point_group
self.previewers = {}
self._previewable_extensions = set()
@cached_property
def previewable_extensions(self):
if self.entry_point_group is not None:
self.load_entry_point_group(self.entry_point_group)
self.entry_point_group = None
return self._previewable_extensions
@cached_property
def record_file_factory(self):
"""Load default record file factory."""
try:
get_distribution('invenio-records-files')
from invenio_records_files.utils import record_file_factory
default = record_file_factory
except DistributionNotFound:
def default(pid, record, filename):
return None
return load_or_import_from_config(
'PREVIEWER_RECORD_FILE_FACOTRY',
app=self.app,
default=default,
)
@property
def css_bundles(self):
return self.app.config['PREVIEWER_BASE_CSS_BUNDLES']
@property
def js_bundles(self):
return self.app.config['PREVIEWER_BASE_JS_BUNDLES']
def register_previewer(self, name, previewer):
"""Register a previewer in the system."""
if name in self.previewers:
assert name not in self.previewers, \
"Previewer with same name already registered"
self.previewers[name] = previewer
if hasattr(previewer, 'previewable_extensions'):
self._previewable_extensions |= set(
previewer.previewable_extensions)
def load_entry_point_group(self, entry_point_group):
"""Load previewers from an entry point group."""
for ep in pkg_resources.iter_entry_points(group=entry_point_group):
self.register_previewer(ep.name, ep.load())
|
inveniosoftware/invenio-previewer | invenio_previewer/ext.py | InvenioPreviewer.init_app | python | def init_app(self, app, entry_point_group='invenio_previewer.previewers'):
self.init_config(app)
app.register_blueprint(blueprint)
state = _InvenioPreviewerState(
app,
entry_point_group=entry_point_group)
app.extensions['invenio-previewer'] = state
return state | Flask application initialization. | train | https://github.com/inveniosoftware/invenio-previewer/blob/558fd22e0f29cc8cd7a6999abd4febcf6b248c49/invenio_previewer/ext.py#L118-L126 | [
"def init_config(self, app):\n \"\"\"Initialize configuration.\"\"\"\n app.config.setdefault(\n 'PREVIEWER_BASE_TEMPLATE', 'invenio_previewer/base.html')\n\n for k in dir(config):\n if k.startswith('PREVIEWER_'):\n app.config.setdefault(k, getattr(config, k))\n"
] | class InvenioPreviewer(object):
"""Invenio-Previewer extension."""
def __init__(self, app, **kwargs):
"""Extension initialization."""
if app:
self._state = self.init_app(app, **kwargs)
def init_config(self, app):
"""Initialize configuration."""
app.config.setdefault(
'PREVIEWER_BASE_TEMPLATE', 'invenio_previewer/base.html')
for k in dir(config):
if k.startswith('PREVIEWER_'):
app.config.setdefault(k, getattr(config, k))
def __getattr__(self, name):
"""Proxy to state object."""
return getattr(self._state, name, None)
|
arokem/python-matlab-bridge | tools/github_stats.py | split_pulls | python | def split_pulls(all_issues, project="arokem/python-matlab-bridge"):
pulls = []
issues = []
for i in all_issues:
if is_pull_request(i):
pull = get_pull_request(project, i['number'], auth=True)
pulls.append(pull)
else:
issues.append(i)
return issues, pulls | split a list of closed issues into non-PR Issues and Pull Requests | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/github_stats.py#L53-L63 | [
"def get_pull_request(project, num, auth=False):\n \"\"\"get pull request info by number\n \"\"\"\n url = \"https://api.github.com/repos/{project}/pulls/{num}\".format(project=project, num=num)\n if auth:\n header = make_auth_header()\n else:\n header = None\n response = requests.ge... | #!/usr/bin/env python
"""Simple tools to query github.com and gather stats about issues.
Thanks to the IPython team for developing this!
python github_stats.py --milestone 2.0 --since-tag rel-1.0.0
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import codecs
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from subprocess import check_output
from gh_api import (
get_paged_request, make_auth_header, get_pull_request, is_pull_request,
get_milestone_id, get_issues_list, get_authors,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def round_hour(dt):
return dt.replace(minute=0,second=0,microsecond=0)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def issues_closed_since(period=timedelta(days=365), project="arokem/python-matlab-bridge", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
"""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
since = round_hour(datetime.utcnow() - period)
else:
since = period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url, headers=make_auth_header())
filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
if pulls:
filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
# filter out PRs not against master (backports)
filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
else:
filtered = [ i for i in filtered if not is_pull_request(i) ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title.
"""
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
print(u'#%d: %s' % (i['number'],
i['title'].replace(u'`', u'``')))
else:
for i in issues:
print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
if __name__ == "__main__":
# deal with unicode
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Whether to add reST urls for all issues in printout.
show_urls = True
parser = ArgumentParser()
parser.add_argument('--since-tag', type=str,
help="The git tag to use for the starting point (typically the last major release)."
)
parser.add_argument('--milestone', type=str,
help="The GitHub milestone to use for filtering issues [optional]."
)
parser.add_argument('--days', type=int,
help="The number of days of data to summarize (use this or --since-tag)."
)
parser.add_argument('--project', type=str, default="arokem/python-matlab-bridge",
help="The project to summarize."
)
opts = parser.parse_args()
tag = opts.since_tag
# set `since` from days or git tag
if opts.days:
since = datetime.utcnow() - timedelta(days=opts.days)
else:
if not tag:
tag = check_output(['git', 'describe', '--abbrev=0']).strip()
cmd = ['git', 'log', '-1', '--format=%ai', tag]
tagday, tz = check_output(cmd).strip().rsplit(' ', 1)
since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
h = int(tz[1:3])
m = int(tz[3:])
td = timedelta(hours=h, minutes=m)
if tz[0] == '-':
since += td
else:
since -= td
since = round_hour(since)
milestone = opts.milestone
project = opts.project
print("fetching GitHub stats since %s (tag: %s, milestone: %s)" % (since, tag, milestone), file=sys.stderr)
if milestone:
milestone_id = get_milestone_id(project=project, milestone=milestone,
auth=True)
issues_and_pulls = get_issues_list(project=project,
milestone=milestone_id,
state='closed',
auth=True,
)
issues, pulls = split_pulls(issues_and_pulls)
else:
issues = issues_closed_since(since, project=project, pulls=False)
pulls = issues_closed_since(since, project=project, pulls=True)
# For regular reports, it's nice to show them in reverse chronological order
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
# Print summary report we can directly include into release notes.
print()
since_day = since.strftime("%Y/%m/%d")
today = datetime.today().strftime("%Y/%m/%d")
print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
print()
print("These lists are automatically generated, and may be incomplete or contain duplicates.")
print()
ncommits = 0
all_authors = []
if tag:
# print git info, in addition to GitHub info:
since_tag = tag+'..'
cmd = ['git', 'log', '--oneline', since_tag]
ncommits += len(check_output(cmd).splitlines())
author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
pr_authors = []
for pr in pulls:
pr_authors.extend(get_authors(pr))
ncommits = len(pr_authors) + ncommits - len(pulls)
author_cmd = ['git', 'check-mailmap'] + pr_authors
with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
print()
print('\n'.join(unique_authors))
print()
print("We closed %d issues and merged %d pull requests;\n"
"this is the full list (generated with the script \n"
":file:`tools/github_stats.py`):" % (n_pulls, n_issues))
print()
print('Pull Requests (%d):\n' % n_pulls)
report(pulls, show_urls)
print()
print('Issues (%d):\n' % n_issues)
report(issues, show_urls)
|
arokem/python-matlab-bridge | tools/github_stats.py | issues_closed_since | python | def issues_closed_since(period=timedelta(days=365), project="arokem/python-matlab-bridge", pulls=False):
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
since = round_hour(datetime.utcnow() - period)
else:
since = period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url, headers=make_auth_header())
filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
if pulls:
filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
# filter out PRs not against master (backports)
filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
else:
filtered = [ i for i in filtered if not is_pull_request(i) ]
return filtered | Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/github_stats.py#L66-L89 | [
"def make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n",
"def get_paged_request(url, headers=None, **params):\n \"\"\"get a full list, handling APIv3's paging\"\"\"\n results = []\n params.setdefault(\"per_page\", 100)\n while True:\n if '?' in url:\n p... | #!/usr/bin/env python
"""Simple tools to query github.com and gather stats about issues.
Thanks to the IPython team for developing this!
python github_stats.py --milestone 2.0 --since-tag rel-1.0.0
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import codecs
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from subprocess import check_output
from gh_api import (
get_paged_request, make_auth_header, get_pull_request, is_pull_request,
get_milestone_id, get_issues_list, get_authors,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def round_hour(dt):
return dt.replace(minute=0,second=0,microsecond=0)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def split_pulls(all_issues, project="arokem/python-matlab-bridge"):
"""split a list of closed issues into non-PR Issues and Pull Requests"""
pulls = []
issues = []
for i in all_issues:
if is_pull_request(i):
pull = get_pull_request(project, i['number'], auth=True)
pulls.append(pull)
else:
issues.append(i)
return issues, pulls
def issues_closed_since(period=timedelta(days=365), project="arokem/python-matlab-bridge", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
"""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
since = round_hour(datetime.utcnow() - period)
else:
since = period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url, headers=make_auth_header())
filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
if pulls:
filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
# filter out PRs not against master (backports)
filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
else:
filtered = [ i for i in filtered if not is_pull_request(i) ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title.
"""
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
print(u'#%d: %s' % (i['number'],
i['title'].replace(u'`', u'``')))
else:
for i in issues:
print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
if __name__ == "__main__":
# deal with unicode
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Whether to add reST urls for all issues in printout.
show_urls = True
parser = ArgumentParser()
parser.add_argument('--since-tag', type=str,
help="The git tag to use for the starting point (typically the last major release)."
)
parser.add_argument('--milestone', type=str,
help="The GitHub milestone to use for filtering issues [optional]."
)
parser.add_argument('--days', type=int,
help="The number of days of data to summarize (use this or --since-tag)."
)
parser.add_argument('--project', type=str, default="arokem/python-matlab-bridge",
help="The project to summarize."
)
opts = parser.parse_args()
tag = opts.since_tag
# set `since` from days or git tag
if opts.days:
since = datetime.utcnow() - timedelta(days=opts.days)
else:
if not tag:
tag = check_output(['git', 'describe', '--abbrev=0']).strip()
cmd = ['git', 'log', '-1', '--format=%ai', tag]
tagday, tz = check_output(cmd).strip().rsplit(' ', 1)
since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
h = int(tz[1:3])
m = int(tz[3:])
td = timedelta(hours=h, minutes=m)
if tz[0] == '-':
since += td
else:
since -= td
since = round_hour(since)
milestone = opts.milestone
project = opts.project
print("fetching GitHub stats since %s (tag: %s, milestone: %s)" % (since, tag, milestone), file=sys.stderr)
if milestone:
milestone_id = get_milestone_id(project=project, milestone=milestone,
auth=True)
issues_and_pulls = get_issues_list(project=project,
milestone=milestone_id,
state='closed',
auth=True,
)
issues, pulls = split_pulls(issues_and_pulls)
else:
issues = issues_closed_since(since, project=project, pulls=False)
pulls = issues_closed_since(since, project=project, pulls=True)
# For regular reports, it's nice to show them in reverse chronological order
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
# Print summary report we can directly include into release notes.
print()
since_day = since.strftime("%Y/%m/%d")
today = datetime.today().strftime("%Y/%m/%d")
print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
print()
print("These lists are automatically generated, and may be incomplete or contain duplicates.")
print()
ncommits = 0
all_authors = []
if tag:
# print git info, in addition to GitHub info:
since_tag = tag+'..'
cmd = ['git', 'log', '--oneline', since_tag]
ncommits += len(check_output(cmd).splitlines())
author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
pr_authors = []
for pr in pulls:
pr_authors.extend(get_authors(pr))
ncommits = len(pr_authors) + ncommits - len(pulls)
author_cmd = ['git', 'check-mailmap'] + pr_authors
with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
print()
print('\n'.join(unique_authors))
print()
print("We closed %d issues and merged %d pull requests;\n"
"this is the full list (generated with the script \n"
":file:`tools/github_stats.py`):" % (n_pulls, n_issues))
print()
print('Pull Requests (%d):\n' % n_pulls)
report(pulls, show_urls)
print()
print('Issues (%d):\n' % n_issues)
report(issues, show_urls)
|
arokem/python-matlab-bridge | tools/github_stats.py | sorted_by_field | python | def sorted_by_field(issues, field='closed_at', reverse=False):
return sorted(issues, key = lambda i:i[field], reverse=reverse) | Return a list of issues sorted by closing date date. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/github_stats.py#L92-L94 | null | #!/usr/bin/env python
"""Simple tools to query github.com and gather stats about issues.
Thanks to the IPython team for developing this!
python github_stats.py --milestone 2.0 --since-tag rel-1.0.0
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import codecs
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from subprocess import check_output
from gh_api import (
get_paged_request, make_auth_header, get_pull_request, is_pull_request,
get_milestone_id, get_issues_list, get_authors,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def round_hour(dt):
return dt.replace(minute=0,second=0,microsecond=0)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def split_pulls(all_issues, project="arokem/python-matlab-bridge"):
"""split a list of closed issues into non-PR Issues and Pull Requests"""
pulls = []
issues = []
for i in all_issues:
if is_pull_request(i):
pull = get_pull_request(project, i['number'], auth=True)
pulls.append(pull)
else:
issues.append(i)
return issues, pulls
def issues_closed_since(period=timedelta(days=365), project="arokem/python-matlab-bridge", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
"""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
since = round_hour(datetime.utcnow() - period)
else:
since = period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url, headers=make_auth_header())
filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
if pulls:
filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
# filter out PRs not against master (backports)
filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
else:
filtered = [ i for i in filtered if not is_pull_request(i) ]
return filtered
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title.
"""
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
print(u'#%d: %s' % (i['number'],
i['title'].replace(u'`', u'``')))
else:
for i in issues:
print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
if __name__ == "__main__":
# deal with unicode
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Whether to add reST urls for all issues in printout.
show_urls = True
parser = ArgumentParser()
parser.add_argument('--since-tag', type=str,
help="The git tag to use for the starting point (typically the last major release)."
)
parser.add_argument('--milestone', type=str,
help="The GitHub milestone to use for filtering issues [optional]."
)
parser.add_argument('--days', type=int,
help="The number of days of data to summarize (use this or --since-tag)."
)
parser.add_argument('--project', type=str, default="arokem/python-matlab-bridge",
help="The project to summarize."
)
opts = parser.parse_args()
tag = opts.since_tag
# set `since` from days or git tag
if opts.days:
since = datetime.utcnow() - timedelta(days=opts.days)
else:
if not tag:
tag = check_output(['git', 'describe', '--abbrev=0']).strip()
cmd = ['git', 'log', '-1', '--format=%ai', tag]
tagday, tz = check_output(cmd).strip().rsplit(' ', 1)
since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
h = int(tz[1:3])
m = int(tz[3:])
td = timedelta(hours=h, minutes=m)
if tz[0] == '-':
since += td
else:
since -= td
since = round_hour(since)
milestone = opts.milestone
project = opts.project
print("fetching GitHub stats since %s (tag: %s, milestone: %s)" % (since, tag, milestone), file=sys.stderr)
if milestone:
milestone_id = get_milestone_id(project=project, milestone=milestone,
auth=True)
issues_and_pulls = get_issues_list(project=project,
milestone=milestone_id,
state='closed',
auth=True,
)
issues, pulls = split_pulls(issues_and_pulls)
else:
issues = issues_closed_since(since, project=project, pulls=False)
pulls = issues_closed_since(since, project=project, pulls=True)
# For regular reports, it's nice to show them in reverse chronological order
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
# Print summary report we can directly include into release notes.
print()
since_day = since.strftime("%Y/%m/%d")
today = datetime.today().strftime("%Y/%m/%d")
print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
print()
print("These lists are automatically generated, and may be incomplete or contain duplicates.")
print()
ncommits = 0
all_authors = []
if tag:
# print git info, in addition to GitHub info:
since_tag = tag+'..'
cmd = ['git', 'log', '--oneline', since_tag]
ncommits += len(check_output(cmd).splitlines())
author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
pr_authors = []
for pr in pulls:
pr_authors.extend(get_authors(pr))
ncommits = len(pr_authors) + ncommits - len(pulls)
author_cmd = ['git', 'check-mailmap'] + pr_authors
with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
print()
print('\n'.join(unique_authors))
print()
print("We closed %d issues and merged %d pull requests;\n"
"this is the full list (generated with the script \n"
":file:`tools/github_stats.py`):" % (n_pulls, n_issues))
print()
print('Pull Requests (%d):\n' % n_pulls)
report(pulls, show_urls)
print()
print('Issues (%d):\n' % n_issues)
report(issues, show_urls)
|
arokem/python-matlab-bridge | tools/github_stats.py | report | python | def report(issues, show_urls=False):
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
print(u'#%d: %s' % (i['number'],
i['title'].replace(u'`', u'``')))
else:
for i in issues:
print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``'))) | Summary report about a list of issues, printing number and title. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/github_stats.py#L97-L107 | null | #!/usr/bin/env python
"""Simple tools to query github.com and gather stats about issues.
Thanks to the IPython team for developing this!
python github_stats.py --milestone 2.0 --since-tag rel-1.0.0
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import codecs
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from subprocess import check_output
from gh_api import (
get_paged_request, make_auth_header, get_pull_request, is_pull_request,
get_milestone_id, get_issues_list, get_authors,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def round_hour(dt):
return dt.replace(minute=0,second=0,microsecond=0)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def split_pulls(all_issues, project="arokem/python-matlab-bridge"):
"""split a list of closed issues into non-PR Issues and Pull Requests"""
pulls = []
issues = []
for i in all_issues:
if is_pull_request(i):
pull = get_pull_request(project, i['number'], auth=True)
pulls.append(pull)
else:
issues.append(i)
return issues, pulls
def issues_closed_since(period=timedelta(days=365), project="arokem/python-matlab-bridge", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
"""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
since = round_hour(datetime.utcnow() - period)
else:
since = period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url, headers=make_auth_header())
filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
if pulls:
filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
# filter out PRs not against master (backports)
filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
else:
filtered = [ i for i in filtered if not is_pull_request(i) ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key = lambda i:i[field], reverse=reverse)
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
if __name__ == "__main__":
# deal with unicode
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Whether to add reST urls for all issues in printout.
show_urls = True
parser = ArgumentParser()
parser.add_argument('--since-tag', type=str,
help="The git tag to use for the starting point (typically the last major release)."
)
parser.add_argument('--milestone', type=str,
help="The GitHub milestone to use for filtering issues [optional]."
)
parser.add_argument('--days', type=int,
help="The number of days of data to summarize (use this or --since-tag)."
)
parser.add_argument('--project', type=str, default="arokem/python-matlab-bridge",
help="The project to summarize."
)
opts = parser.parse_args()
tag = opts.since_tag
# set `since` from days or git tag
if opts.days:
since = datetime.utcnow() - timedelta(days=opts.days)
else:
if not tag:
tag = check_output(['git', 'describe', '--abbrev=0']).strip()
cmd = ['git', 'log', '-1', '--format=%ai', tag]
tagday, tz = check_output(cmd).strip().rsplit(' ', 1)
since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
h = int(tz[1:3])
m = int(tz[3:])
td = timedelta(hours=h, minutes=m)
if tz[0] == '-':
since += td
else:
since -= td
since = round_hour(since)
milestone = opts.milestone
project = opts.project
print("fetching GitHub stats since %s (tag: %s, milestone: %s)" % (since, tag, milestone), file=sys.stderr)
if milestone:
milestone_id = get_milestone_id(project=project, milestone=milestone,
auth=True)
issues_and_pulls = get_issues_list(project=project,
milestone=milestone_id,
state='closed',
auth=True,
)
issues, pulls = split_pulls(issues_and_pulls)
else:
issues = issues_closed_since(since, project=project, pulls=False)
pulls = issues_closed_since(since, project=project, pulls=True)
# For regular reports, it's nice to show them in reverse chronological order
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
# Print summary report we can directly include into release notes.
print()
since_day = since.strftime("%Y/%m/%d")
today = datetime.today().strftime("%Y/%m/%d")
print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
print()
print("These lists are automatically generated, and may be incomplete or contain duplicates.")
print()
ncommits = 0
all_authors = []
if tag:
# print git info, in addition to GitHub info:
since_tag = tag+'..'
cmd = ['git', 'log', '--oneline', since_tag]
ncommits += len(check_output(cmd).splitlines())
author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
pr_authors = []
for pr in pulls:
pr_authors.extend(get_authors(pr))
ncommits = len(pr_authors) + ncommits - len(pulls)
author_cmd = ['git', 'check-mailmap'] + pr_authors
with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
print()
print('\n'.join(unique_authors))
print()
print("We closed %d issues and merged %d pull requests;\n"
"this is the full list (generated with the script \n"
":file:`tools/github_stats.py`):" % (n_pulls, n_issues))
print()
print('Pull Requests (%d):\n' % n_pulls)
report(pulls, show_urls)
print()
print('Issues (%d):\n' % n_issues)
report(issues, show_urls)
|
arokem/python-matlab-bridge | pymatbridge/pymatbridge.py | encode_ndarray | python | def encode_ndarray(obj):
shape = obj.shape
if len(shape) == 1:
shape = (1, obj.shape[0])
if obj.flags.c_contiguous:
obj = obj.T
elif not obj.flags.f_contiguous:
obj = asfortranarray(obj.T)
else:
obj = obj.T
try:
data = obj.astype(float64).tobytes()
except AttributeError:
data = obj.astype(float64).tostring()
data = base64.b64encode(data).decode('utf-8')
return data, shape | Write a numpy array and its shape to base64 buffers | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/pymatbridge.py#L46-L63 | null | """
pymatbridge
===========
This is a module for communicating and running Matlab from within python
Example
-------
>>> import pymatbridge
>>> m = pymatbridge.Matlab()
>>> m.start()
Starting MATLAB on ZMQ socket ipc:///tmp/pymatbridge
Send 'exit' command to kill the server
.MATLAB started and connected!
True
>>> m.run_code('a=1;')
{'content': {'stdout': '', 'datadir': '/private/tmp/MatlabData/', 'code': 'a=1;', 'figures': []}, 'success': True}
>>> m.get_variable('a')
1
"""
import atexit
import os
import time
import base64
import zmq
import subprocess
import sys
import json
import types
import weakref
import random
from uuid import uuid4
from numpy import ndarray, generic, float64, frombuffer, asfortranarray
try:
from scipy.sparse import spmatrix
except ImportError:
class spmatrix:
pass
# JSON encoder extension to handle complex numbers and numpy arrays
class PymatEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ndarray) and obj.dtype.kind in 'uif':
data, shape = encode_ndarray(obj)
return {'ndarray': True, 'shape': shape, 'data': data}
elif isinstance(obj, ndarray) and obj.dtype.kind == 'c':
real, shape = encode_ndarray(obj.real.copy())
imag, _ = encode_ndarray(obj.imag.copy())
return {'ndarray': True, 'shape': shape,
'real': real, 'imag': imag}
elif isinstance(obj, ndarray):
return obj.tolist()
elif isinstance(obj, complex):
return {'real': obj.real, 'imag': obj.imag}
elif isinstance(obj, generic):
return obj.item()
# Handle the default case
return json.JSONEncoder.default(self, obj)
def decode_arr(data):
"""Extract a numpy array from a base64 buffer"""
data = data.encode('utf-8')
return frombuffer(base64.b64decode(data), float64)
# JSON decoder for arrays and complex numbers
def decode_pymat(dct):
if 'ndarray' in dct and 'data' in dct:
value = decode_arr(dct['data'])
shape = dct['shape']
if type(dct['shape']) is not list:
shape = decode_arr(dct['shape']).astype(int)
return value.reshape(shape, order='F')
elif 'ndarray' in dct and 'imag' in dct:
real = decode_arr(dct['real'])
imag = decode_arr(dct['imag'])
shape = decode_arr(dct['shape']).astype(int)
data = real + 1j * imag
return data.reshape(shape, order='F')
elif 'real' in dct and 'imag' in dct:
return complex(dct['real'], dct['imag'])
return dct
MATLAB_FOLDER = '%s/matlab' % os.path.realpath(os.path.dirname(__file__))
class _Session(object):
"""
A class for communicating with a MATLAB session. It provides the behavior
common across different MATLAB implementations. You shouldn't instantiate
this directly; rather, use the Matlab or Octave subclasses.
"""
def __init__(self, executable, socket_addr=None,
id='python-matlab-bridge', log=False, maxtime=60,
platform=None, startup_options=None):
"""
Initialize this thing.
Parameters
----------
executable : str
A string that would start the session at the terminal.
socket_addr : str
A string that represents a valid ZMQ socket address, such as
"ipc:///tmp/pymatbridge", "tcp://127.0.0.1:55555", etc. Default is
to choose a random IPC file name, or a random socket (for TCP).
id : str
An identifier for this instance of the pymatbridge.
log : bool
Whether to save a log file in some known location.
maxtime : float
The maximal time to wait for a response from the session (optional,
Default is 10 sec)
platform : string
The OS of the machine on which this is running. Per default this
will be taken from sys.platform.
startup_options : string
Command line options to include in the executable's invocation.
Optional; sensible defaults are used if this is not provided.
"""
self.started = False
self.executable = executable
self.socket_addr = socket_addr
self.id = id
self.log = log
self.maxtime = maxtime
self.platform = platform if platform is not None else sys.platform
self.startup_options = startup_options
if socket_addr is None:
self.socket_addr = "tcp://127.0.0.1" if self.platform == "win32" else "ipc:///tmp/pymatbridge-%s"%str(uuid4())
if self.log:
startup_options += ' > ./pymatbridge/logs/bashlog_%s.txt' % self.id
self.context = None
self.socket = None
atexit.register(self.stop)
def _program_name(self): # pragma: no cover
raise NotImplemented
def _preamble_code(self):
# suppress warnings while loading the path, in the case of
# overshadowing a built-in function on a newer version of
# Matlab (e.g. isrow)
return ["old_warning_state = warning('off','all');",
"addpath(genpath('%s'));" % MATLAB_FOLDER,
"warning(old_warning_state);",
"clear('old_warning_state');",
"cd('%s');" % os.getcwd()]
def _execute_flag(self): # pragma: no cover
raise NotImplemented
def _run_server(self):
code = self._preamble_code()
code.extend([
"matlabserver('%s')" % self.socket_addr
])
command = '%s %s %s "%s"' % (self.executable, self.startup_options,
self._execute_flag(), ','.join(code))
subprocess.Popen(command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Start server/client session and make the connection
def start(self):
# Setup socket
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
if self.platform == "win32":
rndport = random.randrange(49152, 65536)
self.socket_addr = self.socket_addr + ":%s"%rndport
# Start the MATLAB server in a new process
print("Starting %s on ZMQ socket %s" % (self._program_name(), self.socket_addr))
print("Send 'exit' command to kill the server")
self._run_server()
# Start the client
self.socket.connect(self.socket_addr)
self.started = True
# Test if connection is established
if self.is_connected():
print("%s started and connected!" % self._program_name())
self.set_plot_settings()
return self
else:
raise ValueError("%s failed to start" % self._program_name())
def _response(self, **kwargs):
req = json.dumps(kwargs, cls=PymatEncoder)
self.socket.send_string(req)
resp = self.socket.recv_string()
return resp
# Stop the Matlab server
def stop(self):
if not self.started:
return True
# Matlab should respond with "exit" if successful
if self._response(cmd='exit') == "exit":
print("%s closed" % self._program_name())
self.started = False
return True
# To test if the client can talk to the server
def is_connected(self):
if not self.started:
time.sleep(2)
return False
req = json.dumps(dict(cmd="connect"), cls=PymatEncoder)
self.socket.send_string(req)
start_time = time.time()
while True:
try:
resp = self.socket.recv_string(flags=zmq.NOBLOCK)
return resp == "connected"
except zmq.ZMQError:
sys.stdout.write('.')
time.sleep(1)
if time.time() - start_time > self.maxtime:
print("%s session timed out after %d seconds" % (self._program_name(), self.maxtime))
return False
def is_function_processor_working(self):
result = self.run_func('%s/usrprog/test_sum.m' % MATLAB_FOLDER,
{'echo': '%s: Function processor is working!' % self._program_name()})
return result['success']
def _json_response(self, **kwargs):
return json.loads(self._response(**kwargs), object_hook=decode_pymat)
def run_func(self, func_path, *func_args, **kwargs):
"""Run a function in Matlab and return the result.
Parameters
----------
func_path: str
Name of function to run or a path to an m-file.
func_args: object, optional
Function args to send to the function.
nargout: int, optional
Desired number of return arguments.
kwargs:
Keyword arguments are passed to Matlab in the form [key, val] so
that matlab.plot(x, y, '--', LineWidth=2) would be translated into
plot(x, y, '--', 'LineWidth', 2)
Returns
-------
Result dictionary with keys: 'message', 'result', and 'success'
"""
if not self.started:
raise ValueError('Session not started, use start()')
nargout = kwargs.pop('nargout', 1)
func_args += tuple(item for pair in zip(kwargs.keys(), kwargs.values())
for item in pair)
dname = os.path.dirname(func_path)
fname = os.path.basename(func_path)
func_name, ext = os.path.splitext(fname)
if ext and not ext == '.m':
raise TypeError('Need to give path to .m file')
return self._json_response(cmd='eval',
func_name=func_name,
func_args=func_args or '',
dname=dname,
nargout=nargout)
def run_code(self, code):
"""Run some code in Matlab command line provide by a string
Parameters
----------
code : str
Code to send for evaluation.
"""
return self.run_func('evalin', 'base', code, nargout=0)
def get_variable(self, varname, default=None):
resp = self.run_func('evalin', 'base', varname)
return resp['result'] if resp['success'] else default
def set_variable(self, varname, value):
if isinstance(value, spmatrix):
return self._set_sparse_variable(varname, value)
return self.run_func('assignin', 'base', varname, value, nargout=0)
def set_plot_settings(self, width=512, height=384, inline=True):
if inline:
code = ["set(0, 'defaultfigurevisible', 'off')"]
else:
code = ["set(0, 'defaultfigurevisible', 'on')"]
size = "set(0, 'defaultfigurepaperposition', [0 0 %s %s])"
code += ["set(0, 'defaultfigurepaperunits', 'inches')",
"set(0, 'defaultfigureunits', 'inches')",
size % (int(width) / 150., int(height) / 150.)]
self.run_code(';'.join(code))
def _set_sparse_variable(self, varname, value):
value = value.todok()
prefix = 'pymatbridge_temp_sparse_%s_' % uuid4().hex
self.set_variable(prefix + 'keys', list(value.keys()))
# correct for 1-indexing in MATLAB
self.run_code('{0}keys = {0}keys + 1;'.format(prefix))
self.set_variable(prefix + 'values', list(value.values()))
cmd = "{1} = sparse({0}keys(:, 1), {0}keys(:, 2), {0}values');"
result = self.run_code(cmd.format(prefix, varname))
self.run_code('clear {0}keys {0}values'.format(prefix))
return result
def __getattr__(self, name):
"""If an attribute is not found, try to create a bound method"""
return self._bind_method(name)
def _bind_method(self, name, unconditionally=False):
"""Generate a Matlab function and bind it to the instance
This is where the magic happens. When an unknown attribute of the
Matlab class is requested, it is assumed to be a call to a
Matlab function, and is generated and bound to the instance.
This works because getattr() falls back to __getattr__ only if no
attributes of the requested name can be found through normal
routes (__getattribute__, __dict__, class tree).
bind_method first checks whether the requested name is a callable
Matlab function before generating a binding.
Parameters
----------
name : str
The name of the Matlab function to call
e.g. 'sqrt', 'sum', 'svd', etc
unconditionally : bool, optional
Bind the method without performing
checks. Used to bootstrap methods that are required and
know to exist
Returns
-------
MatlabFunction
A reference to a newly bound MatlabFunction instance if the
requested name is determined to be a callable function
Raises
------
AttributeError: if the requested name is not a callable
Matlab function
"""
# TODO: This does not work if the function is a mex function inside a folder of the same name
exists = self.run_func('exist', name)['result'] in [2, 3, 5]
if not unconditionally and not exists:
raise AttributeError("'Matlab' object has no attribute '%s'" % name)
# create a new method instance
method_instance = MatlabFunction(weakref.ref(self), name)
method_instance.__name__ = name
# bind to the Matlab instance with a weakref (to avoid circular references)
if sys.version.startswith('3'):
method = types.MethodType(method_instance, weakref.ref(self))
else:
method = types.MethodType(method_instance, weakref.ref(self),
_Session)
setattr(self, name, method)
return getattr(self, name)
class Matlab(_Session):
def __init__(self, executable='matlab', socket_addr=None,
id='python-matlab-bridge', log=False, maxtime=60,
platform=None, startup_options=None):
"""
Initialize this thing.
Parameters
----------
executable : str
A string that would start Matlab at the terminal. Per default, this
is set to 'matlab', so that you can alias in your bash setup
socket_addr : str
A string that represents a valid ZMQ socket address, such as
"ipc:///tmp/pymatbridge", "tcp://127.0.0.1:55555", etc.
id : str
An identifier for this instance of the pymatbridge.
log : bool
Whether to save a log file in some known location.
maxtime : float
The maximal time to wait for a response from matlab (optional,
Default is 10 sec)
platform : string
The OS of the machine on which this is running. Per default this
will be taken from sys.platform.
startup_options : string
Command line options to pass to MATLAB. Optional; sensible defaults
are used if this is not provided.
"""
if platform is None:
platform = sys.platform
if startup_options is None:
if platform == 'win32':
startup_options = ' -automation -nosplash'
else:
startup_options = ' -nodesktop -nosplash'
if log:
startup_options += ' -logfile ./pymatbridge/logs/matlablog_%s.txt' % id
super(Matlab, self).__init__(executable, socket_addr, id, log, maxtime,
platform, startup_options)
def _program_name(self):
return 'MATLAB'
def _execute_flag(self):
return '-r'
class Octave(_Session):
def __init__(self, executable='octave', socket_addr=None,
id='python-matlab-bridge', log=False, maxtime=60,
platform=None, startup_options=None):
"""
Initialize this thing.
Parameters
----------
executable : str
A string that would start Octave at the terminal. Per default, this
is set to 'octave', so that you can alias in your bash setup
socket_addr : str
A string that represents a valid ZMQ socket address, such as
"ipc:///tmp/pymatbridge", "tcp://127.0.0.1:55555", etc.
id : str
An identifier for this instance of the pymatbridge.
log : bool
Whether to save a log file in some known location.
maxtime : float
The maximal time to wait for a response from octave (optional,
Default is 10 sec)
platform : string
The OS of the machine on which this is running. Per default this
will be taken from sys.platform.
startup_options : string
Command line options to pass to Octave. Optional; sensible defaults
are used if this is not provided.
"""
if startup_options is None:
startup_options = '--silent --no-gui'
super(Octave, self).__init__(executable, socket_addr, id, log, maxtime,
platform, startup_options)
def _program_name(self):
return 'Octave'
def _preamble_code(self):
code = super(Octave, self)._preamble_code()
if self.log:
code.append("diary('./pymatbridge/logs/octavelog_%s.txt')" % self.id)
code.append("graphics_toolkit('gnuplot')")
return code
def _execute_flag(self):
return '--eval'
class MatlabFunction(object):
def __init__(self, parent, name):
"""An object representing a Matlab function
Methods are dynamically bound to instances of Matlab objects and
represent a callable function in the Matlab subprocess.
Parameters
----------
parent: Matlab instance
A reference to the parent (Matlab instance) to which the
MatlabFunction is being bound
name: str
The name of the Matlab function this represents
"""
self.name = name
self._parent = parent
self.doc = None
def __call__(self, unused_parent_weakref, *args, **kwargs):
"""Call a function with the supplied arguments in the Matlab subprocess
Passes parameters to `run_func`.
"""
return self.parent.run_func(self.name, *args, **kwargs)
@property
def parent(self):
"""Get the actual parent from the stored weakref
The parent (Matlab instance) is stored as a weak reference
to eliminate circular references from dynamically binding Methods
to Matlab.
"""
parent = self._parent()
if parent is None:
raise AttributeError('Stale reference to attribute of non-existent Matlab object')
return parent
@property
def __doc__(self):
"""Fetch the docstring from Matlab
Get the documentation for a Matlab function by calling Matlab's builtin
help() then returning it as the Python docstring. The result is cached
so Matlab is only ever polled on the first request
"""
if self.doc is None:
self.doc = self.parent.help(self.name)['result']
return self.doc
|
arokem/python-matlab-bridge | pymatbridge/pymatbridge.py | decode_arr | python | def decode_arr(data):
data = data.encode('utf-8')
return frombuffer(base64.b64decode(data), float64) | Extract a numpy array from a base64 buffer | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/pymatbridge.py#L88-L91 | null | """
pymatbridge
===========
This is a module for communicating and running Matlab from within python
Example
-------
>>> import pymatbridge
>>> m = pymatbridge.Matlab()
>>> m.start()
Starting MATLAB on ZMQ socket ipc:///tmp/pymatbridge
Send 'exit' command to kill the server
.MATLAB started and connected!
True
>>> m.run_code('a=1;')
{'content': {'stdout': '', 'datadir': '/private/tmp/MatlabData/', 'code': 'a=1;', 'figures': []}, 'success': True}
>>> m.get_variable('a')
1
"""
import atexit
import os
import time
import base64
import zmq
import subprocess
import sys
import json
import types
import weakref
import random
from uuid import uuid4
from numpy import ndarray, generic, float64, frombuffer, asfortranarray
try:
from scipy.sparse import spmatrix
except ImportError:
class spmatrix:
pass
def encode_ndarray(obj):
"""Write a numpy array and its shape to base64 buffers"""
shape = obj.shape
if len(shape) == 1:
shape = (1, obj.shape[0])
if obj.flags.c_contiguous:
obj = obj.T
elif not obj.flags.f_contiguous:
obj = asfortranarray(obj.T)
else:
obj = obj.T
try:
data = obj.astype(float64).tobytes()
except AttributeError:
data = obj.astype(float64).tostring()
data = base64.b64encode(data).decode('utf-8')
return data, shape
# JSON encoder extension to handle complex numbers and numpy arrays
class PymatEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ndarray) and obj.dtype.kind in 'uif':
data, shape = encode_ndarray(obj)
return {'ndarray': True, 'shape': shape, 'data': data}
elif isinstance(obj, ndarray) and obj.dtype.kind == 'c':
real, shape = encode_ndarray(obj.real.copy())
imag, _ = encode_ndarray(obj.imag.copy())
return {'ndarray': True, 'shape': shape,
'real': real, 'imag': imag}
elif isinstance(obj, ndarray):
return obj.tolist()
elif isinstance(obj, complex):
return {'real': obj.real, 'imag': obj.imag}
elif isinstance(obj, generic):
return obj.item()
# Handle the default case
return json.JSONEncoder.default(self, obj)
# JSON decoder for arrays and complex numbers
def decode_pymat(dct):
if 'ndarray' in dct and 'data' in dct:
value = decode_arr(dct['data'])
shape = dct['shape']
if type(dct['shape']) is not list:
shape = decode_arr(dct['shape']).astype(int)
return value.reshape(shape, order='F')
elif 'ndarray' in dct and 'imag' in dct:
real = decode_arr(dct['real'])
imag = decode_arr(dct['imag'])
shape = decode_arr(dct['shape']).astype(int)
data = real + 1j * imag
return data.reshape(shape, order='F')
elif 'real' in dct and 'imag' in dct:
return complex(dct['real'], dct['imag'])
return dct
MATLAB_FOLDER = '%s/matlab' % os.path.realpath(os.path.dirname(__file__))
class _Session(object):
"""
A class for communicating with a MATLAB session. It provides the behavior
common across different MATLAB implementations. You shouldn't instantiate
this directly; rather, use the Matlab or Octave subclasses.
"""
def __init__(self, executable, socket_addr=None,
id='python-matlab-bridge', log=False, maxtime=60,
platform=None, startup_options=None):
"""
Initialize this thing.
Parameters
----------
executable : str
A string that would start the session at the terminal.
socket_addr : str
A string that represents a valid ZMQ socket address, such as
"ipc:///tmp/pymatbridge", "tcp://127.0.0.1:55555", etc. Default is
to choose a random IPC file name, or a random socket (for TCP).
id : str
An identifier for this instance of the pymatbridge.
log : bool
Whether to save a log file in some known location.
maxtime : float
The maximal time to wait for a response from the session (optional,
Default is 10 sec)
platform : string
The OS of the machine on which this is running. Per default this
will be taken from sys.platform.
startup_options : string
Command line options to include in the executable's invocation.
Optional; sensible defaults are used if this is not provided.
"""
self.started = False
self.executable = executable
self.socket_addr = socket_addr
self.id = id
self.log = log
self.maxtime = maxtime
self.platform = platform if platform is not None else sys.platform
self.startup_options = startup_options
if socket_addr is None:
self.socket_addr = "tcp://127.0.0.1" if self.platform == "win32" else "ipc:///tmp/pymatbridge-%s"%str(uuid4())
if self.log:
startup_options += ' > ./pymatbridge/logs/bashlog_%s.txt' % self.id
self.context = None
self.socket = None
atexit.register(self.stop)
def _program_name(self): # pragma: no cover
raise NotImplemented
def _preamble_code(self):
# suppress warnings while loading the path, in the case of
# overshadowing a built-in function on a newer version of
# Matlab (e.g. isrow)
return ["old_warning_state = warning('off','all');",
"addpath(genpath('%s'));" % MATLAB_FOLDER,
"warning(old_warning_state);",
"clear('old_warning_state');",
"cd('%s');" % os.getcwd()]
def _execute_flag(self): # pragma: no cover
raise NotImplemented
def _run_server(self):
code = self._preamble_code()
code.extend([
"matlabserver('%s')" % self.socket_addr
])
command = '%s %s %s "%s"' % (self.executable, self.startup_options,
self._execute_flag(), ','.join(code))
subprocess.Popen(command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Start server/client session and make the connection
def start(self):
# Setup socket
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
if self.platform == "win32":
rndport = random.randrange(49152, 65536)
self.socket_addr = self.socket_addr + ":%s"%rndport
# Start the MATLAB server in a new process
print("Starting %s on ZMQ socket %s" % (self._program_name(), self.socket_addr))
print("Send 'exit' command to kill the server")
self._run_server()
# Start the client
self.socket.connect(self.socket_addr)
self.started = True
# Test if connection is established
if self.is_connected():
print("%s started and connected!" % self._program_name())
self.set_plot_settings()
return self
else:
raise ValueError("%s failed to start" % self._program_name())
def _response(self, **kwargs):
req = json.dumps(kwargs, cls=PymatEncoder)
self.socket.send_string(req)
resp = self.socket.recv_string()
return resp
# Stop the Matlab server
def stop(self):
if not self.started:
return True
# Matlab should respond with "exit" if successful
if self._response(cmd='exit') == "exit":
print("%s closed" % self._program_name())
self.started = False
return True
# To test if the client can talk to the server
def is_connected(self):
if not self.started:
time.sleep(2)
return False
req = json.dumps(dict(cmd="connect"), cls=PymatEncoder)
self.socket.send_string(req)
start_time = time.time()
while True:
try:
resp = self.socket.recv_string(flags=zmq.NOBLOCK)
return resp == "connected"
except zmq.ZMQError:
sys.stdout.write('.')
time.sleep(1)
if time.time() - start_time > self.maxtime:
print("%s session timed out after %d seconds" % (self._program_name(), self.maxtime))
return False
def is_function_processor_working(self):
result = self.run_func('%s/usrprog/test_sum.m' % MATLAB_FOLDER,
{'echo': '%s: Function processor is working!' % self._program_name()})
return result['success']
def _json_response(self, **kwargs):
return json.loads(self._response(**kwargs), object_hook=decode_pymat)
def run_func(self, func_path, *func_args, **kwargs):
"""Run a function in Matlab and return the result.
Parameters
----------
func_path: str
Name of function to run or a path to an m-file.
func_args: object, optional
Function args to send to the function.
nargout: int, optional
Desired number of return arguments.
kwargs:
Keyword arguments are passed to Matlab in the form [key, val] so
that matlab.plot(x, y, '--', LineWidth=2) would be translated into
plot(x, y, '--', 'LineWidth', 2)
Returns
-------
Result dictionary with keys: 'message', 'result', and 'success'
"""
if not self.started:
raise ValueError('Session not started, use start()')
nargout = kwargs.pop('nargout', 1)
func_args += tuple(item for pair in zip(kwargs.keys(), kwargs.values())
for item in pair)
dname = os.path.dirname(func_path)
fname = os.path.basename(func_path)
func_name, ext = os.path.splitext(fname)
if ext and not ext == '.m':
raise TypeError('Need to give path to .m file')
return self._json_response(cmd='eval',
func_name=func_name,
func_args=func_args or '',
dname=dname,
nargout=nargout)
def run_code(self, code):
"""Run some code in Matlab command line provide by a string
Parameters
----------
code : str
Code to send for evaluation.
"""
return self.run_func('evalin', 'base', code, nargout=0)
def get_variable(self, varname, default=None):
resp = self.run_func('evalin', 'base', varname)
return resp['result'] if resp['success'] else default
def set_variable(self, varname, value):
if isinstance(value, spmatrix):
return self._set_sparse_variable(varname, value)
return self.run_func('assignin', 'base', varname, value, nargout=0)
def set_plot_settings(self, width=512, height=384, inline=True):
if inline:
code = ["set(0, 'defaultfigurevisible', 'off')"]
else:
code = ["set(0, 'defaultfigurevisible', 'on')"]
size = "set(0, 'defaultfigurepaperposition', [0 0 %s %s])"
code += ["set(0, 'defaultfigurepaperunits', 'inches')",
"set(0, 'defaultfigureunits', 'inches')",
size % (int(width) / 150., int(height) / 150.)]
self.run_code(';'.join(code))
def _set_sparse_variable(self, varname, value):
value = value.todok()
prefix = 'pymatbridge_temp_sparse_%s_' % uuid4().hex
self.set_variable(prefix + 'keys', list(value.keys()))
# correct for 1-indexing in MATLAB
self.run_code('{0}keys = {0}keys + 1;'.format(prefix))
self.set_variable(prefix + 'values', list(value.values()))
cmd = "{1} = sparse({0}keys(:, 1), {0}keys(:, 2), {0}values');"
result = self.run_code(cmd.format(prefix, varname))
self.run_code('clear {0}keys {0}values'.format(prefix))
return result
def __getattr__(self, name):
"""If an attribute is not found, try to create a bound method"""
return self._bind_method(name)
def _bind_method(self, name, unconditionally=False):
"""Generate a Matlab function and bind it to the instance
This is where the magic happens. When an unknown attribute of the
Matlab class is requested, it is assumed to be a call to a
Matlab function, and is generated and bound to the instance.
This works because getattr() falls back to __getattr__ only if no
attributes of the requested name can be found through normal
routes (__getattribute__, __dict__, class tree).
bind_method first checks whether the requested name is a callable
Matlab function before generating a binding.
Parameters
----------
name : str
The name of the Matlab function to call
e.g. 'sqrt', 'sum', 'svd', etc
unconditionally : bool, optional
Bind the method without performing
checks. Used to bootstrap methods that are required and
know to exist
Returns
-------
MatlabFunction
A reference to a newly bound MatlabFunction instance if the
requested name is determined to be a callable function
Raises
------
AttributeError: if the requested name is not a callable
Matlab function
"""
# TODO: This does not work if the function is a mex function inside a folder of the same name
exists = self.run_func('exist', name)['result'] in [2, 3, 5]
if not unconditionally and not exists:
raise AttributeError("'Matlab' object has no attribute '%s'" % name)
# create a new method instance
method_instance = MatlabFunction(weakref.ref(self), name)
method_instance.__name__ = name
# bind to the Matlab instance with a weakref (to avoid circular references)
if sys.version.startswith('3'):
method = types.MethodType(method_instance, weakref.ref(self))
else:
method = types.MethodType(method_instance, weakref.ref(self),
_Session)
setattr(self, name, method)
return getattr(self, name)
class Matlab(_Session):
def __init__(self, executable='matlab', socket_addr=None,
id='python-matlab-bridge', log=False, maxtime=60,
platform=None, startup_options=None):
"""
Initialize this thing.
Parameters
----------
executable : str
A string that would start Matlab at the terminal. Per default, this
is set to 'matlab', so that you can alias in your bash setup
socket_addr : str
A string that represents a valid ZMQ socket address, such as
"ipc:///tmp/pymatbridge", "tcp://127.0.0.1:55555", etc.
id : str
An identifier for this instance of the pymatbridge.
log : bool
Whether to save a log file in some known location.
maxtime : float
The maximal time to wait for a response from matlab (optional,
Default is 10 sec)
platform : string
The OS of the machine on which this is running. Per default this
will be taken from sys.platform.
startup_options : string
Command line options to pass to MATLAB. Optional; sensible defaults
are used if this is not provided.
"""
if platform is None:
platform = sys.platform
if startup_options is None:
if platform == 'win32':
startup_options = ' -automation -nosplash'
else:
startup_options = ' -nodesktop -nosplash'
if log:
startup_options += ' -logfile ./pymatbridge/logs/matlablog_%s.txt' % id
super(Matlab, self).__init__(executable, socket_addr, id, log, maxtime,
platform, startup_options)
def _program_name(self):
return 'MATLAB'
def _execute_flag(self):
return '-r'
class Octave(_Session):
def __init__(self, executable='octave', socket_addr=None,
id='python-matlab-bridge', log=False, maxtime=60,
platform=None, startup_options=None):
"""
Initialize this thing.
Parameters
----------
executable : str
A string that would start Octave at the terminal. Per default, this
is set to 'octave', so that you can alias in your bash setup
socket_addr : str
A string that represents a valid ZMQ socket address, such as
"ipc:///tmp/pymatbridge", "tcp://127.0.0.1:55555", etc.
id : str
An identifier for this instance of the pymatbridge.
log : bool
Whether to save a log file in some known location.
maxtime : float
The maximal time to wait for a response from octave (optional,
Default is 10 sec)
platform : string
The OS of the machine on which this is running. Per default this
will be taken from sys.platform.
startup_options : string
Command line options to pass to Octave. Optional; sensible defaults
are used if this is not provided.
"""
if startup_options is None:
startup_options = '--silent --no-gui'
super(Octave, self).__init__(executable, socket_addr, id, log, maxtime,
platform, startup_options)
def _program_name(self):
return 'Octave'
def _preamble_code(self):
code = super(Octave, self)._preamble_code()
if self.log:
code.append("diary('./pymatbridge/logs/octavelog_%s.txt')" % self.id)
code.append("graphics_toolkit('gnuplot')")
return code
def _execute_flag(self):
return '--eval'
class MatlabFunction(object):
def __init__(self, parent, name):
"""An object representing a Matlab function
Methods are dynamically bound to instances of Matlab objects and
represent a callable function in the Matlab subprocess.
Parameters
----------
parent: Matlab instance
A reference to the parent (Matlab instance) to which the
MatlabFunction is being bound
name: str
The name of the Matlab function this represents
"""
self.name = name
self._parent = parent
self.doc = None
def __call__(self, unused_parent_weakref, *args, **kwargs):
"""Call a function with the supplied arguments in the Matlab subprocess
Passes parameters to `run_func`.
"""
return self.parent.run_func(self.name, *args, **kwargs)
@property
def parent(self):
"""Get the actual parent from the stored weakref
The parent (Matlab instance) is stored as a weak reference
to eliminate circular references from dynamically binding Methods
to Matlab.
"""
parent = self._parent()
if parent is None:
raise AttributeError('Stale reference to attribute of non-existent Matlab object')
return parent
@property
def __doc__(self):
"""Fetch the docstring from Matlab
Get the documentation for a Matlab function by calling Matlab's builtin
help() then returning it as the Python docstring. The result is cached
so Matlab is only ever polled on the first request
"""
if self.doc is None:
self.doc = self.parent.help(self.name)['result']
return self.doc
|
arokem/python-matlab-bridge | pymatbridge/pymatbridge.py | _Session.run_func | python | def run_func(self, func_path, *func_args, **kwargs):
if not self.started:
raise ValueError('Session not started, use start()')
nargout = kwargs.pop('nargout', 1)
func_args += tuple(item for pair in zip(kwargs.keys(), kwargs.values())
for item in pair)
dname = os.path.dirname(func_path)
fname = os.path.basename(func_path)
func_name, ext = os.path.splitext(fname)
if ext and not ext == '.m':
raise TypeError('Need to give path to .m file')
return self._json_response(cmd='eval',
func_name=func_name,
func_args=func_args or '',
dname=dname,
nargout=nargout) | Run a function in Matlab and return the result.
Parameters
----------
func_path: str
Name of function to run or a path to an m-file.
func_args: object, optional
Function args to send to the function.
nargout: int, optional
Desired number of return arguments.
kwargs:
Keyword arguments are passed to Matlab in the form [key, val] so
that matlab.plot(x, y, '--', LineWidth=2) would be translated into
plot(x, y, '--', 'LineWidth', 2)
Returns
-------
Result dictionary with keys: 'message', 'result', and 'success' | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/pymatbridge.py#L276-L311 | [
"def _json_response(self, **kwargs):\n return json.loads(self._response(**kwargs), object_hook=decode_pymat)\n"
] | class _Session(object):
"""
A class for communicating with a MATLAB session. It provides the behavior
common across different MATLAB implementations. You shouldn't instantiate
this directly; rather, use the Matlab or Octave subclasses.
"""
def __init__(self, executable, socket_addr=None,
id='python-matlab-bridge', log=False, maxtime=60,
platform=None, startup_options=None):
"""
Initialize this thing.
Parameters
----------
executable : str
A string that would start the session at the terminal.
socket_addr : str
A string that represents a valid ZMQ socket address, such as
"ipc:///tmp/pymatbridge", "tcp://127.0.0.1:55555", etc. Default is
to choose a random IPC file name, or a random socket (for TCP).
id : str
An identifier for this instance of the pymatbridge.
log : bool
Whether to save a log file in some known location.
maxtime : float
The maximal time to wait for a response from the session (optional,
Default is 10 sec)
platform : string
The OS of the machine on which this is running. Per default this
will be taken from sys.platform.
startup_options : string
Command line options to include in the executable's invocation.
Optional; sensible defaults are used if this is not provided.
"""
self.started = False
self.executable = executable
self.socket_addr = socket_addr
self.id = id
self.log = log
self.maxtime = maxtime
self.platform = platform if platform is not None else sys.platform
self.startup_options = startup_options
if socket_addr is None:
self.socket_addr = "tcp://127.0.0.1" if self.platform == "win32" else "ipc:///tmp/pymatbridge-%s"%str(uuid4())
if self.log:
startup_options += ' > ./pymatbridge/logs/bashlog_%s.txt' % self.id
self.context = None
self.socket = None
atexit.register(self.stop)
def _program_name(self): # pragma: no cover
raise NotImplemented
def _preamble_code(self):
# suppress warnings while loading the path, in the case of
# overshadowing a built-in function on a newer version of
# Matlab (e.g. isrow)
return ["old_warning_state = warning('off','all');",
"addpath(genpath('%s'));" % MATLAB_FOLDER,
"warning(old_warning_state);",
"clear('old_warning_state');",
"cd('%s');" % os.getcwd()]
def _execute_flag(self): # pragma: no cover
raise NotImplemented
def _run_server(self):
code = self._preamble_code()
code.extend([
"matlabserver('%s')" % self.socket_addr
])
command = '%s %s %s "%s"' % (self.executable, self.startup_options,
self._execute_flag(), ','.join(code))
subprocess.Popen(command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Start server/client session and make the connection
def start(self):
# Setup socket
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
if self.platform == "win32":
rndport = random.randrange(49152, 65536)
self.socket_addr = self.socket_addr + ":%s"%rndport
# Start the MATLAB server in a new process
print("Starting %s on ZMQ socket %s" % (self._program_name(), self.socket_addr))
print("Send 'exit' command to kill the server")
self._run_server()
# Start the client
self.socket.connect(self.socket_addr)
self.started = True
# Test if connection is established
if self.is_connected():
print("%s started and connected!" % self._program_name())
self.set_plot_settings()
return self
else:
raise ValueError("%s failed to start" % self._program_name())
def _response(self, **kwargs):
req = json.dumps(kwargs, cls=PymatEncoder)
self.socket.send_string(req)
resp = self.socket.recv_string()
return resp
# Stop the Matlab server
def stop(self):
if not self.started:
return True
# Matlab should respond with "exit" if successful
if self._response(cmd='exit') == "exit":
print("%s closed" % self._program_name())
self.started = False
return True
# To test if the client can talk to the server
def is_connected(self):
if not self.started:
time.sleep(2)
return False
req = json.dumps(dict(cmd="connect"), cls=PymatEncoder)
self.socket.send_string(req)
start_time = time.time()
while True:
try:
resp = self.socket.recv_string(flags=zmq.NOBLOCK)
return resp == "connected"
except zmq.ZMQError:
sys.stdout.write('.')
time.sleep(1)
if time.time() - start_time > self.maxtime:
print("%s session timed out after %d seconds" % (self._program_name(), self.maxtime))
return False
def is_function_processor_working(self):
result = self.run_func('%s/usrprog/test_sum.m' % MATLAB_FOLDER,
{'echo': '%s: Function processor is working!' % self._program_name()})
return result['success']
def _json_response(self, **kwargs):
return json.loads(self._response(**kwargs), object_hook=decode_pymat)
def run_code(self, code):
"""Run some code in Matlab command line provide by a string
Parameters
----------
code : str
Code to send for evaluation.
"""
return self.run_func('evalin', 'base', code, nargout=0)
def get_variable(self, varname, default=None):
resp = self.run_func('evalin', 'base', varname)
return resp['result'] if resp['success'] else default
def set_variable(self, varname, value):
if isinstance(value, spmatrix):
return self._set_sparse_variable(varname, value)
return self.run_func('assignin', 'base', varname, value, nargout=0)
def set_plot_settings(self, width=512, height=384, inline=True):
if inline:
code = ["set(0, 'defaultfigurevisible', 'off')"]
else:
code = ["set(0, 'defaultfigurevisible', 'on')"]
size = "set(0, 'defaultfigurepaperposition', [0 0 %s %s])"
code += ["set(0, 'defaultfigurepaperunits', 'inches')",
"set(0, 'defaultfigureunits', 'inches')",
size % (int(width) / 150., int(height) / 150.)]
self.run_code(';'.join(code))
def _set_sparse_variable(self, varname, value):
value = value.todok()
prefix = 'pymatbridge_temp_sparse_%s_' % uuid4().hex
self.set_variable(prefix + 'keys', list(value.keys()))
# correct for 1-indexing in MATLAB
self.run_code('{0}keys = {0}keys + 1;'.format(prefix))
self.set_variable(prefix + 'values', list(value.values()))
cmd = "{1} = sparse({0}keys(:, 1), {0}keys(:, 2), {0}values');"
result = self.run_code(cmd.format(prefix, varname))
self.run_code('clear {0}keys {0}values'.format(prefix))
return result
def __getattr__(self, name):
"""If an attribute is not found, try to create a bound method"""
return self._bind_method(name)
def _bind_method(self, name, unconditionally=False):
"""Generate a Matlab function and bind it to the instance
This is where the magic happens. When an unknown attribute of the
Matlab class is requested, it is assumed to be a call to a
Matlab function, and is generated and bound to the instance.
This works because getattr() falls back to __getattr__ only if no
attributes of the requested name can be found through normal
routes (__getattribute__, __dict__, class tree).
bind_method first checks whether the requested name is a callable
Matlab function before generating a binding.
Parameters
----------
name : str
The name of the Matlab function to call
e.g. 'sqrt', 'sum', 'svd', etc
unconditionally : bool, optional
Bind the method without performing
checks. Used to bootstrap methods that are required and
know to exist
Returns
-------
MatlabFunction
A reference to a newly bound MatlabFunction instance if the
requested name is determined to be a callable function
Raises
------
AttributeError: if the requested name is not a callable
Matlab function
"""
# TODO: This does not work if the function is a mex function inside a folder of the same name
exists = self.run_func('exist', name)['result'] in [2, 3, 5]
if not unconditionally and not exists:
raise AttributeError("'Matlab' object has no attribute '%s'" % name)
# create a new method instance
method_instance = MatlabFunction(weakref.ref(self), name)
method_instance.__name__ = name
# bind to the Matlab instance with a weakref (to avoid circular references)
if sys.version.startswith('3'):
method = types.MethodType(method_instance, weakref.ref(self))
else:
method = types.MethodType(method_instance, weakref.ref(self),
_Session)
setattr(self, name, method)
return getattr(self, name)
|
arokem/python-matlab-bridge | pymatbridge/pymatbridge.py | _Session._bind_method | python | def _bind_method(self, name, unconditionally=False):
# TODO: This does not work if the function is a mex function inside a folder of the same name
exists = self.run_func('exist', name)['result'] in [2, 3, 5]
if not unconditionally and not exists:
raise AttributeError("'Matlab' object has no attribute '%s'" % name)
# create a new method instance
method_instance = MatlabFunction(weakref.ref(self), name)
method_instance.__name__ = name
# bind to the Matlab instance with a weakref (to avoid circular references)
if sys.version.startswith('3'):
method = types.MethodType(method_instance, weakref.ref(self))
else:
method = types.MethodType(method_instance, weakref.ref(self),
_Session)
setattr(self, name, method)
return getattr(self, name) | Generate a Matlab function and bind it to the instance
This is where the magic happens. When an unknown attribute of the
Matlab class is requested, it is assumed to be a call to a
Matlab function, and is generated and bound to the instance.
This works because getattr() falls back to __getattr__ only if no
attributes of the requested name can be found through normal
routes (__getattribute__, __dict__, class tree).
bind_method first checks whether the requested name is a callable
Matlab function before generating a binding.
Parameters
----------
name : str
The name of the Matlab function to call
e.g. 'sqrt', 'sum', 'svd', etc
unconditionally : bool, optional
Bind the method without performing
checks. Used to bootstrap methods that are required and
know to exist
Returns
-------
MatlabFunction
A reference to a newly bound MatlabFunction instance if the
requested name is determined to be a callable function
Raises
------
AttributeError: if the requested name is not a callable
Matlab function | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/pymatbridge.py#L359-L411 | [
"def run_func(self, func_path, *func_args, **kwargs):\n \"\"\"Run a function in Matlab and return the result.\n\n Parameters\n ----------\n func_path: str\n Name of function to run or a path to an m-file.\n func_args: object, optional\n Function args to send to the function.\n nargou... | class _Session(object):
"""
A class for communicating with a MATLAB session. It provides the behavior
common across different MATLAB implementations. You shouldn't instantiate
this directly; rather, use the Matlab or Octave subclasses.
"""
def __init__(self, executable, socket_addr=None,
id='python-matlab-bridge', log=False, maxtime=60,
platform=None, startup_options=None):
"""
Initialize this thing.
Parameters
----------
executable : str
A string that would start the session at the terminal.
socket_addr : str
A string that represents a valid ZMQ socket address, such as
"ipc:///tmp/pymatbridge", "tcp://127.0.0.1:55555", etc. Default is
to choose a random IPC file name, or a random socket (for TCP).
id : str
An identifier for this instance of the pymatbridge.
log : bool
Whether to save a log file in some known location.
maxtime : float
The maximal time to wait for a response from the session (optional,
Default is 10 sec)
platform : string
The OS of the machine on which this is running. Per default this
will be taken from sys.platform.
startup_options : string
Command line options to include in the executable's invocation.
Optional; sensible defaults are used if this is not provided.
"""
self.started = False
self.executable = executable
self.socket_addr = socket_addr
self.id = id
self.log = log
self.maxtime = maxtime
self.platform = platform if platform is not None else sys.platform
self.startup_options = startup_options
if socket_addr is None:
self.socket_addr = "tcp://127.0.0.1" if self.platform == "win32" else "ipc:///tmp/pymatbridge-%s"%str(uuid4())
if self.log:
startup_options += ' > ./pymatbridge/logs/bashlog_%s.txt' % self.id
self.context = None
self.socket = None
atexit.register(self.stop)
def _program_name(self): # pragma: no cover
raise NotImplemented
def _preamble_code(self):
# suppress warnings while loading the path, in the case of
# overshadowing a built-in function on a newer version of
# Matlab (e.g. isrow)
return ["old_warning_state = warning('off','all');",
"addpath(genpath('%s'));" % MATLAB_FOLDER,
"warning(old_warning_state);",
"clear('old_warning_state');",
"cd('%s');" % os.getcwd()]
def _execute_flag(self): # pragma: no cover
raise NotImplemented
def _run_server(self):
code = self._preamble_code()
code.extend([
"matlabserver('%s')" % self.socket_addr
])
command = '%s %s %s "%s"' % (self.executable, self.startup_options,
self._execute_flag(), ','.join(code))
subprocess.Popen(command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Start server/client session and make the connection
def start(self):
# Setup socket
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
if self.platform == "win32":
rndport = random.randrange(49152, 65536)
self.socket_addr = self.socket_addr + ":%s"%rndport
# Start the MATLAB server in a new process
print("Starting %s on ZMQ socket %s" % (self._program_name(), self.socket_addr))
print("Send 'exit' command to kill the server")
self._run_server()
# Start the client
self.socket.connect(self.socket_addr)
self.started = True
# Test if connection is established
if self.is_connected():
print("%s started and connected!" % self._program_name())
self.set_plot_settings()
return self
else:
raise ValueError("%s failed to start" % self._program_name())
def _response(self, **kwargs):
req = json.dumps(kwargs, cls=PymatEncoder)
self.socket.send_string(req)
resp = self.socket.recv_string()
return resp
# Stop the Matlab server
def stop(self):
if not self.started:
return True
# Matlab should respond with "exit" if successful
if self._response(cmd='exit') == "exit":
print("%s closed" % self._program_name())
self.started = False
return True
# To test if the client can talk to the server
def is_connected(self):
if not self.started:
time.sleep(2)
return False
req = json.dumps(dict(cmd="connect"), cls=PymatEncoder)
self.socket.send_string(req)
start_time = time.time()
while True:
try:
resp = self.socket.recv_string(flags=zmq.NOBLOCK)
return resp == "connected"
except zmq.ZMQError:
sys.stdout.write('.')
time.sleep(1)
if time.time() - start_time > self.maxtime:
print("%s session timed out after %d seconds" % (self._program_name(), self.maxtime))
return False
def is_function_processor_working(self):
result = self.run_func('%s/usrprog/test_sum.m' % MATLAB_FOLDER,
{'echo': '%s: Function processor is working!' % self._program_name()})
return result['success']
def _json_response(self, **kwargs):
return json.loads(self._response(**kwargs), object_hook=decode_pymat)
def run_func(self, func_path, *func_args, **kwargs):
"""Run a function in Matlab and return the result.
Parameters
----------
func_path: str
Name of function to run or a path to an m-file.
func_args: object, optional
Function args to send to the function.
nargout: int, optional
Desired number of return arguments.
kwargs:
Keyword arguments are passed to Matlab in the form [key, val] so
that matlab.plot(x, y, '--', LineWidth=2) would be translated into
plot(x, y, '--', 'LineWidth', 2)
Returns
-------
Result dictionary with keys: 'message', 'result', and 'success'
"""
if not self.started:
raise ValueError('Session not started, use start()')
nargout = kwargs.pop('nargout', 1)
func_args += tuple(item for pair in zip(kwargs.keys(), kwargs.values())
for item in pair)
dname = os.path.dirname(func_path)
fname = os.path.basename(func_path)
func_name, ext = os.path.splitext(fname)
if ext and not ext == '.m':
raise TypeError('Need to give path to .m file')
return self._json_response(cmd='eval',
func_name=func_name,
func_args=func_args or '',
dname=dname,
nargout=nargout)
def run_code(self, code):
"""Run some code in Matlab command line provide by a string
Parameters
----------
code : str
Code to send for evaluation.
"""
return self.run_func('evalin', 'base', code, nargout=0)
def get_variable(self, varname, default=None):
resp = self.run_func('evalin', 'base', varname)
return resp['result'] if resp['success'] else default
def set_variable(self, varname, value):
if isinstance(value, spmatrix):
return self._set_sparse_variable(varname, value)
return self.run_func('assignin', 'base', varname, value, nargout=0)
def set_plot_settings(self, width=512, height=384, inline=True):
if inline:
code = ["set(0, 'defaultfigurevisible', 'off')"]
else:
code = ["set(0, 'defaultfigurevisible', 'on')"]
size = "set(0, 'defaultfigurepaperposition', [0 0 %s %s])"
code += ["set(0, 'defaultfigurepaperunits', 'inches')",
"set(0, 'defaultfigureunits', 'inches')",
size % (int(width) / 150., int(height) / 150.)]
self.run_code(';'.join(code))
def _set_sparse_variable(self, varname, value):
value = value.todok()
prefix = 'pymatbridge_temp_sparse_%s_' % uuid4().hex
self.set_variable(prefix + 'keys', list(value.keys()))
# correct for 1-indexing in MATLAB
self.run_code('{0}keys = {0}keys + 1;'.format(prefix))
self.set_variable(prefix + 'values', list(value.values()))
cmd = "{1} = sparse({0}keys(:, 1), {0}keys(:, 2), {0}values');"
result = self.run_code(cmd.format(prefix, varname))
self.run_code('clear {0}keys {0}values'.format(prefix))
return result
def __getattr__(self, name):
"""If an attribute is not found, try to create a bound method"""
return self._bind_method(name)
|
arokem/python-matlab-bridge | pymatbridge/publish.py | format_line | python | def format_line(line):
if line.startswith('%%'):
md = True
new_cell = True
source = line.split('%%')[1] + '\n' # line-breaks in md require a line
# gap!
elif line.startswith('%'):
md = True
new_cell = False
source = line.split('%')[1] + '\n'
else:
md = False
new_cell = False
source = line
return new_cell, md, source | Format a line of Matlab into either a markdown line or a code line.
Parameters
----------
line : str
The line of code to be formatted. Formatting occurs according to the
following rules:
- If the line starts with (at least) two %% signs, a new cell will be
started.
- If the line doesn't start with a '%' sign, it is assumed to be legit
matlab code. We will continue to add to the same cell until reaching
the next comment line | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/publish.py#L11-L45 | null | try:
import nbformat.v4 as nbformat
from nbformat import write as nbwrite
except ImportError:
import IPython.nbformat.v4 as nbformat
from IPython.nbformat import write as nbwrite
import numpy as np
def mfile_to_lines(mfile):
"""
Read the lines from an mfile
Parameters
----------
mfile : string
Full path to an m file
"""
# We should only be able to read this file:
with open(mfile) as fid:
return fid.readlines()
def lines_to_notebook(lines, name=None):
"""
Convert the lines of an m file into an IPython notebook
Parameters
----------
lines : list
A list of strings. Each element is a line in the m file
Returns
-------
notebook : an IPython NotebookNode class instance, containing the
information required to create a file
"""
source = []
md = np.empty(len(lines), dtype=object)
new_cell = np.empty(len(lines), dtype=object)
for idx, l in enumerate(lines):
new_cell[idx], md[idx], this_source = format_line(l)
# Transitions between markdown and code and vice-versa merit a new
# cell, even if no newline, or "%%" is found. Make sure not to do this
# check for the very first line!
if idx>1 and not new_cell[idx]:
if md[idx] != md[idx-1]:
new_cell[idx] = True
source.append(this_source)
# This defines the breaking points between cells:
new_cell_idx = np.hstack([np.where(new_cell)[0], -1])
# Listify the sources:
cell_source = [source[new_cell_idx[i]:new_cell_idx[i+1]]
for i in range(len(new_cell_idx)-1)]
cell_md = [md[new_cell_idx[i]] for i in range(len(new_cell_idx)-1)]
cells = []
# Append the notebook with loading matlab magic extension
notebook_head = "import pymatbridge as pymat\n" + "ip = get_ipython()\n" \
+ "pymat.load_ipython_extension(ip)"
cells.append(nbformat.new_code_cell(notebook_head))#, language='python'))
for cell_idx, cell_s in enumerate(cell_source):
if cell_md[cell_idx]:
cells.append(nbformat.new_markdown_cell(cell_s))
else:
cell_s.insert(0, '%%matlab\n')
cells.append(nbformat.new_code_cell(cell_s))#, language='matlab'))
#ws = nbformat.new_worksheet(cells=cells)
notebook = nbformat.new_notebook(cells=cells)
return notebook
def convert_mfile(mfile, outfile=None):
"""
Convert a Matlab m-file into a Matlab notebook in ipynb format
Parameters
----------
mfile : string
Full path to a matlab m file to convert
outfile : string (optional)
Full path to the output ipynb file
"""
lines = mfile_to_lines(mfile)
nb = lines_to_notebook(lines)
if outfile is None:
outfile = mfile.split('.m')[0] + '.ipynb'
with open(outfile, 'w') as fid:
nbwrite(nb, fid)
|
arokem/python-matlab-bridge | pymatbridge/publish.py | lines_to_notebook | python | def lines_to_notebook(lines, name=None):
source = []
md = np.empty(len(lines), dtype=object)
new_cell = np.empty(len(lines), dtype=object)
for idx, l in enumerate(lines):
new_cell[idx], md[idx], this_source = format_line(l)
# Transitions between markdown and code and vice-versa merit a new
# cell, even if no newline, or "%%" is found. Make sure not to do this
# check for the very first line!
if idx>1 and not new_cell[idx]:
if md[idx] != md[idx-1]:
new_cell[idx] = True
source.append(this_source)
# This defines the breaking points between cells:
new_cell_idx = np.hstack([np.where(new_cell)[0], -1])
# Listify the sources:
cell_source = [source[new_cell_idx[i]:new_cell_idx[i+1]]
for i in range(len(new_cell_idx)-1)]
cell_md = [md[new_cell_idx[i]] for i in range(len(new_cell_idx)-1)]
cells = []
# Append the notebook with loading matlab magic extension
notebook_head = "import pymatbridge as pymat\n" + "ip = get_ipython()\n" \
+ "pymat.load_ipython_extension(ip)"
cells.append(nbformat.new_code_cell(notebook_head))#, language='python'))
for cell_idx, cell_s in enumerate(cell_source):
if cell_md[cell_idx]:
cells.append(nbformat.new_markdown_cell(cell_s))
else:
cell_s.insert(0, '%%matlab\n')
cells.append(nbformat.new_code_cell(cell_s))#, language='matlab'))
#ws = nbformat.new_worksheet(cells=cells)
notebook = nbformat.new_notebook(cells=cells)
return notebook | Convert the lines of an m file into an IPython notebook
Parameters
----------
lines : list
A list of strings. Each element is a line in the m file
Returns
-------
notebook : an IPython NotebookNode class instance, containing the
information required to create a file | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/publish.py#L62-L114 | [
"def format_line(line):\n \"\"\"\n Format a line of Matlab into either a markdown line or a code line.\n\n Parameters\n ----------\n line : str\n The line of code to be formatted. Formatting occurs according to the\n following rules:\n\n - If the line starts with (at least) two %... | try:
import nbformat.v4 as nbformat
from nbformat import write as nbwrite
except ImportError:
import IPython.nbformat.v4 as nbformat
from IPython.nbformat import write as nbwrite
import numpy as np
def format_line(line):
"""
Format a line of Matlab into either a markdown line or a code line.
Parameters
----------
line : str
The line of code to be formatted. Formatting occurs according to the
following rules:
- If the line starts with (at least) two %% signs, a new cell will be
started.
- If the line doesn't start with a '%' sign, it is assumed to be legit
matlab code. We will continue to add to the same cell until reaching
the next comment line
"""
if line.startswith('%%'):
md = True
new_cell = True
source = line.split('%%')[1] + '\n' # line-breaks in md require a line
# gap!
elif line.startswith('%'):
md = True
new_cell = False
source = line.split('%')[1] + '\n'
else:
md = False
new_cell = False
source = line
return new_cell, md, source
def mfile_to_lines(mfile):
"""
Read the lines from an mfile
Parameters
----------
mfile : string
Full path to an m file
"""
# We should only be able to read this file:
with open(mfile) as fid:
return fid.readlines()
def convert_mfile(mfile, outfile=None):
"""
Convert a Matlab m-file into a Matlab notebook in ipynb format
Parameters
----------
mfile : string
Full path to a matlab m file to convert
outfile : string (optional)
Full path to the output ipynb file
"""
lines = mfile_to_lines(mfile)
nb = lines_to_notebook(lines)
if outfile is None:
outfile = mfile.split('.m')[0] + '.ipynb'
with open(outfile, 'w') as fid:
nbwrite(nb, fid)
|
arokem/python-matlab-bridge | pymatbridge/publish.py | convert_mfile | python | def convert_mfile(mfile, outfile=None):
lines = mfile_to_lines(mfile)
nb = lines_to_notebook(lines)
if outfile is None:
outfile = mfile.split('.m')[0] + '.ipynb'
with open(outfile, 'w') as fid:
nbwrite(nb, fid) | Convert a Matlab m-file into a Matlab notebook in ipynb format
Parameters
----------
mfile : string
Full path to a matlab m file to convert
outfile : string (optional)
Full path to the output ipynb file | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/publish.py#L117-L135 | [
"def mfile_to_lines(mfile):\n \"\"\"\n Read the lines from an mfile\n\n Parameters\n ----------\n mfile : string\n Full path to an m file\n \"\"\"\n # We should only be able to read this file:\n with open(mfile) as fid:\n return fid.readlines()\n",
"def lines_to_notebook(line... | try:
import nbformat.v4 as nbformat
from nbformat import write as nbwrite
except ImportError:
import IPython.nbformat.v4 as nbformat
from IPython.nbformat import write as nbwrite
import numpy as np
def format_line(line):
"""
Format a line of Matlab into either a markdown line or a code line.
Parameters
----------
line : str
The line of code to be formatted. Formatting occurs according to the
following rules:
- If the line starts with (at least) two %% signs, a new cell will be
started.
- If the line doesn't start with a '%' sign, it is assumed to be legit
matlab code. We will continue to add to the same cell until reaching
the next comment line
"""
if line.startswith('%%'):
md = True
new_cell = True
source = line.split('%%')[1] + '\n' # line-breaks in md require a line
# gap!
elif line.startswith('%'):
md = True
new_cell = False
source = line.split('%')[1] + '\n'
else:
md = False
new_cell = False
source = line
return new_cell, md, source
def mfile_to_lines(mfile):
"""
Read the lines from an mfile
Parameters
----------
mfile : string
Full path to an m file
"""
# We should only be able to read this file:
with open(mfile) as fid:
return fid.readlines()
def lines_to_notebook(lines, name=None):
"""
Convert the lines of an m file into an IPython notebook
Parameters
----------
lines : list
A list of strings. Each element is a line in the m file
Returns
-------
notebook : an IPython NotebookNode class instance, containing the
information required to create a file
"""
source = []
md = np.empty(len(lines), dtype=object)
new_cell = np.empty(len(lines), dtype=object)
for idx, l in enumerate(lines):
new_cell[idx], md[idx], this_source = format_line(l)
# Transitions between markdown and code and vice-versa merit a new
# cell, even if no newline, or "%%" is found. Make sure not to do this
# check for the very first line!
if idx>1 and not new_cell[idx]:
if md[idx] != md[idx-1]:
new_cell[idx] = True
source.append(this_source)
# This defines the breaking points between cells:
new_cell_idx = np.hstack([np.where(new_cell)[0], -1])
# Listify the sources:
cell_source = [source[new_cell_idx[i]:new_cell_idx[i+1]]
for i in range(len(new_cell_idx)-1)]
cell_md = [md[new_cell_idx[i]] for i in range(len(new_cell_idx)-1)]
cells = []
# Append the notebook with loading matlab magic extension
notebook_head = "import pymatbridge as pymat\n" + "ip = get_ipython()\n" \
+ "pymat.load_ipython_extension(ip)"
cells.append(nbformat.new_code_cell(notebook_head))#, language='python'))
for cell_idx, cell_s in enumerate(cell_source):
if cell_md[cell_idx]:
cells.append(nbformat.new_markdown_cell(cell_s))
else:
cell_s.insert(0, '%%matlab\n')
cells.append(nbformat.new_code_cell(cell_s))#, language='matlab'))
#ws = nbformat.new_worksheet(cells=cells)
notebook = nbformat.new_notebook(cells=cells)
return notebook
|
arokem/python-matlab-bridge | pymatbridge/matlab_magic.py | load_ipython_extension | python | def load_ipython_extension(ip, **kwargs):
global _loaded
if not _loaded:
ip.register_magics(MatlabMagics(ip, **kwargs))
_loaded = True | Load the extension in IPython. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/matlab_magic.py#L205-L210 | null | """
matlab_magic
============
Magic command interface for interactive work with Matlab(R) via the pymatbridge
"""
from shutil import rmtree
import numpy as np
import IPython
from IPython.core.displaypub import publish_display_data
from IPython.core.magic import (Magics, magics_class,
line_cell_magic, needs_local_scope)
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
from IPython.utils.py3compat import unicode_to_str, PY3
import pymatbridge as pymat
from .compat import text_type
class MatlabInterperterError(RuntimeError):
"""
Some error occurs while matlab is running
"""
def __init__(self, line, err):
self.line = line
self.err = err
def __unicode__(self):
s = "Failed to parse and evaluate line %r.\n Matlab error message: %r"%\
(self.line, self.err)
return s
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode_to_str(text_type(self), 'utf-8')
@magics_class
class MatlabMagics(Magics):
"""
A set of magics for interactive work with Matlab(R).
"""
def __init__(self, shell,
matlab='matlab',
pyconverter=np.asarray,
**kwargs):
"""
Parameters
----------
shell : IPython shell
matlab : str
The system call to start a matlab session. Allows you to choose a
particular version of matlab if you want
pyconverter : callable
To be called on matlab variables returning into the ipython
namespace
kwargs: additional key-word arguments to pass to initialization of
the Matlab/Octave process
"""
super(MatlabMagics, self).__init__(shell)
if 'octave' in matlab.lower():
self.Matlab = pymat.Octave(matlab, **kwargs)
else:
self.Matlab = pymat.Matlab(matlab, **kwargs)
self.Matlab.start()
self.pyconverter = pyconverter
def eval(self, line):
"""
Parse and evaluate a single line of matlab
"""
run_dict = self.Matlab.run_code(line)
if not run_dict['success']:
raise MatlabInterperterError(line, run_dict['content']['stdout'])
# This is the matlab stdout:
return run_dict
def set_matlab_var(self, name, value):
"""
Set up a variable in Matlab workspace
"""
run_dict = self.Matlab.set_variable(name, value)
if not run_dict['success']:
raise MatlabInterperterError(line, run_dict['content']['stdout'])
@magic_arguments()
@argument(
'-i', '--input', action='append',
help='Names of input variable from shell.user_ns to be assigned to Matlab variables of the same names after calling self.pyconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-o', '--output', action='append',
help='Names of variables to be pushed from matlab to shell.user_ns after executing cell body and applying self.Matlab.get_variable(). Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-s', '--silent', action='store_true',
help='Do not display text output of MATLAB command'
)
@argument(
'-S', '--size', action='store', default='512,384',
help='Pixel size of plots, "width,height.'
)
@argument(
'-g', '--gui', action='store_true',
help='Show plots in a graphical user interface'
)
@argument(
'code',
nargs='*',
)
@needs_local_scope
@line_cell_magic
def matlab(self, line, cell=None, local_ns=None):
"Execute code in matlab."
args = parse_argstring(self.matlab, line)
code = line if cell is None else ' '.join(args.code) + cell
if local_ns is None:
local_ns = {}
width, height = args.size.split(',')
self.Matlab.set_plot_settings(width, height, not args.gui)
if args.input:
for input in ','.join(args.input).split(','):
try:
val = local_ns[input]
except KeyError:
val = self.shell.user_ns[input]
# The _Session.set_variable function which this calls
# should correctly detect numpy arrays and serialize them
# as json correctly.
self.set_matlab_var(input, val)
try:
result_dict = self.eval(code)
except MatlabInterperterError:
raise
except:
raise RuntimeError('\n'.join([
"There was an error running the code:",
code,
"-----------------------",
"Are you sure Matlab is started?",
]))
text_output = result_dict['content']['stdout']
# Figures get saved by matlab in reverse order...
imgfiles = result_dict['content']['figures'][::-1]
data_dir = result_dict['content']['datadir']
display_data = []
if text_output and not args.silent:
display_data.append(('MatlabMagic.matlab',
{'text/plain': text_output}))
for imgf in imgfiles:
if len(imgf):
# Store the path to the directory so that you can delete it
# later on:
with open(imgf, 'rb') as fid:
image = fid.read()
display_data.append(('MatlabMagic.matlab',
{'image/png': image}))
for disp_d in display_data:
publish_display_data(source=disp_d[0], data=disp_d[1])
# Delete the temporary data files created by matlab:
if len(data_dir):
rmtree(data_dir)
if args.output:
for output in ','.join(args.output).split(','):
self.shell.push({output:self.Matlab.get_variable(output)})
_loaded = False
def unload_ipython_extension(ip):
global _loaded
if _loaded:
magic = ip.magics_manager.registry.pop('MatlabMagics')
magic.Matlab.stop()
_loaded = False
|
arokem/python-matlab-bridge | tools/gh_api.py | post_gist | python | def post_gist(content, description='', filename='file', auth=False):
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url'] | Post some text to a Gist, and return the URL. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/gh_api.py#L80-L96 | [
"def make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n"
] | """Functions for Github API requests."""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os
import re
import sys
import requests
import getpass
import json
try:
import requests_cache
except ImportError:
print("no cache", file=sys.stderr)
else:
requests_cache.install_cache("gh_api", expire_after=3600)
# Keyring stores passwords by a 'username', but we're not storing a username and
# password
fake_username = 'ipython_tools'
class Obj(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def get_pull_request(project, num, auth=False):
"""get pull request info by number
"""
url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj)
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results
def get_pulls_list(project, auth=False, **params):
"""get pull request list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/pulls".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/issues".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
milestones = get_paged_request(url, headers=headers, **params)
return milestones
def get_milestone_id(project, milestone, auth=False, **params):
milestones = get_milestones(project, auth=auth, **params)
for mstone in milestones:
if mstone['title'] == milestone:
return mstone['number']
else:
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def get_authors(pr):
print("getting authors for #%i" % pr['number'], file=sys.stderr)
h = make_auth_header()
r = requests.get(pr['commits_url'], headers=h)
r.raise_for_status()
commits = r.json()
authors = []
for commit in commits:
author = commit['commit']['author']
authors.append("%s <%s>" % (author['name'], author['email']))
return authors
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
'Policy', 'Signature', 'Content-Type', 'file'):
yield (key, fields.pop(key))
for (k,v) in fields.items():
yield k,v
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, six, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
def post_download(project, filename, name=None, description=""):
"""Upload a file to the GitHub downloads area"""
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = "https://api.github.com/repos/{project}/downloads".format(project=project)
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
return s3r
|
arokem/python-matlab-bridge | tools/gh_api.py | get_pull_request | python | def get_pull_request(project, num, auth=False):
url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj) | get pull request info by number | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/gh_api.py#L98-L108 | [
"def make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n"
] | """Functions for Github API requests."""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os
import re
import sys
import requests
import getpass
import json
try:
import requests_cache
except ImportError:
print("no cache", file=sys.stderr)
else:
requests_cache.install_cache("gh_api", expire_after=3600)
# Keyring stores passwords by a 'username', but we're not storing a username and
# password
fake_username = 'ipython_tools'
class Obj(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results
def get_pulls_list(project, auth=False, **params):
"""get pull request list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/pulls".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/issues".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
milestones = get_paged_request(url, headers=headers, **params)
return milestones
def get_milestone_id(project, milestone, auth=False, **params):
milestones = get_milestones(project, auth=auth, **params)
for mstone in milestones:
if mstone['title'] == milestone:
return mstone['number']
else:
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def get_authors(pr):
print("getting authors for #%i" % pr['number'], file=sys.stderr)
h = make_auth_header()
r = requests.get(pr['commits_url'], headers=h)
r.raise_for_status()
commits = r.json()
authors = []
for commit in commits:
author = commit['commit']['author']
authors.append("%s <%s>" % (author['name'], author['email']))
return authors
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
'Policy', 'Signature', 'Content-Type', 'file'):
yield (key, fields.pop(key))
for (k,v) in fields.items():
yield k,v
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, six, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
def post_download(project, filename, name=None, description=""):
"""Upload a file to the GitHub downloads area"""
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = "https://api.github.com/repos/{project}/downloads".format(project=project)
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
return s3r
|
arokem/python-matlab-bridge | tools/gh_api.py | get_pull_request_files | python | def get_pull_request_files(project, num, auth=False):
url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header) | get list of files in a pull request | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/gh_api.py#L110-L117 | [
"def make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n",
"def get_paged_request(url, headers=None, **params):\n \"\"\"get a full list, handling APIv3's paging\"\"\"\n results = []\n params.setdefault(\"per_page\", 100)\n while True:\n if '?' in url:\n p... | """Functions for Github API requests."""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os
import re
import sys
import requests
import getpass
import json
try:
import requests_cache
except ImportError:
print("no cache", file=sys.stderr)
else:
requests_cache.install_cache("gh_api", expire_after=3600)
# Keyring stores passwords by a 'username', but we're not storing a username and
# password
fake_username = 'ipython_tools'
class Obj(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request(project, num, auth=False):
"""get pull request info by number
"""
url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results
def get_pulls_list(project, auth=False, **params):
"""get pull request list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/pulls".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/issues".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
milestones = get_paged_request(url, headers=headers, **params)
return milestones
def get_milestone_id(project, milestone, auth=False, **params):
milestones = get_milestones(project, auth=auth, **params)
for mstone in milestones:
if mstone['title'] == milestone:
return mstone['number']
else:
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def get_authors(pr):
print("getting authors for #%i" % pr['number'], file=sys.stderr)
h = make_auth_header()
r = requests.get(pr['commits_url'], headers=h)
r.raise_for_status()
commits = r.json()
authors = []
for commit in commits:
author = commit['commit']['author']
authors.append("%s <%s>" % (author['name'], author['email']))
return authors
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
'Policy', 'Signature', 'Content-Type', 'file'):
yield (key, fields.pop(key))
for (k,v) in fields.items():
yield k,v
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, six, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
def post_download(project, filename, name=None, description=""):
"""Upload a file to the GitHub downloads area"""
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = "https://api.github.com/repos/{project}/downloads".format(project=project)
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
return s3r
|
arokem/python-matlab-bridge | tools/gh_api.py | get_paged_request | python | def get_paged_request(url, headers=None, **params):
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results | get a full list, handling APIv3's paging | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/gh_api.py#L122-L139 | null | """Functions for Github API requests."""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os
import re
import sys
import requests
import getpass
import json
try:
import requests_cache
except ImportError:
print("no cache", file=sys.stderr)
else:
requests_cache.install_cache("gh_api", expire_after=3600)
# Keyring stores passwords by a 'username', but we're not storing a username and
# password
fake_username = 'ipython_tools'
class Obj(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request(project, num, auth=False):
"""get pull request info by number
"""
url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj)
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_pulls_list(project, auth=False, **params):
"""get pull request list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/pulls".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/issues".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
milestones = get_paged_request(url, headers=headers, **params)
return milestones
def get_milestone_id(project, milestone, auth=False, **params):
milestones = get_milestones(project, auth=auth, **params)
for mstone in milestones:
if mstone['title'] == milestone:
return mstone['number']
else:
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def get_authors(pr):
print("getting authors for #%i" % pr['number'], file=sys.stderr)
h = make_auth_header()
r = requests.get(pr['commits_url'], headers=h)
r.raise_for_status()
commits = r.json()
authors = []
for commit in commits:
author = commit['commit']['author']
authors.append("%s <%s>" % (author['name'], author['email']))
return authors
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
'Policy', 'Signature', 'Content-Type', 'file'):
yield (key, fields.pop(key))
for (k,v) in fields.items():
yield k,v
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, six, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
def post_download(project, filename, name=None, description=""):
"""Upload a file to the GitHub downloads area"""
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = "https://api.github.com/repos/{project}/downloads".format(project=project)
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
return s3r
|
arokem/python-matlab-bridge | tools/gh_api.py | get_pulls_list | python | def get_pulls_list(project, auth=False, **params):
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/pulls".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages | get pull request list | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/gh_api.py#L141-L150 | [
"def make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n",
"def get_paged_request(url, headers=None, **params):\n \"\"\"get a full list, handling APIv3's paging\"\"\"\n results = []\n params.setdefault(\"per_page\", 100)\n while True:\n if '?' in url:\n p... | """Functions for Github API requests."""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os
import re
import sys
import requests
import getpass
import json
try:
import requests_cache
except ImportError:
print("no cache", file=sys.stderr)
else:
requests_cache.install_cache("gh_api", expire_after=3600)
# Keyring stores passwords by a 'username', but we're not storing a username and
# password
fake_username = 'ipython_tools'
class Obj(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request(project, num, auth=False):
"""get pull request info by number
"""
url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj)
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/issues".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
milestones = get_paged_request(url, headers=headers, **params)
return milestones
def get_milestone_id(project, milestone, auth=False, **params):
milestones = get_milestones(project, auth=auth, **params)
for mstone in milestones:
if mstone['title'] == milestone:
return mstone['number']
else:
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def get_authors(pr):
print("getting authors for #%i" % pr['number'], file=sys.stderr)
h = make_auth_header()
r = requests.get(pr['commits_url'], headers=h)
r.raise_for_status()
commits = r.json()
authors = []
for commit in commits:
author = commit['commit']['author']
authors.append("%s <%s>" % (author['name'], author['email']))
return authors
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
'Policy', 'Signature', 'Content-Type', 'file'):
yield (key, fields.pop(key))
for (k,v) in fields.items():
yield k,v
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, six, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
def post_download(project, filename, name=None, description=""):
"""Upload a file to the GitHub downloads area"""
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = "https://api.github.com/repos/{project}/downloads".format(project=project)
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
return s3r
|
arokem/python-matlab-bridge | tools/gh_api.py | encode_multipart_formdata | python | def encode_multipart_formdata(fields, boundary=None):
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, six, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type | Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/gh_api.py#L207-L260 | [
"def iter_fields(fields):\n fields = fields.copy()\n for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',\n 'Policy', 'Signature', 'Content-Type', 'file'):\n yield (key, fields.pop(key))\n for (k,v) in fields.items():\n yield k,v\n"
] | """Functions for Github API requests."""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os
import re
import sys
import requests
import getpass
import json
try:
import requests_cache
except ImportError:
print("no cache", file=sys.stderr)
else:
requests_cache.install_cache("gh_api", expire_after=3600)
# Keyring stores passwords by a 'username', but we're not storing a username and
# password
fake_username = 'ipython_tools'
class Obj(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request(project, num, auth=False):
"""get pull request info by number
"""
url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj)
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results
def get_pulls_list(project, auth=False, **params):
"""get pull request list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/pulls".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/issues".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
milestones = get_paged_request(url, headers=headers, **params)
return milestones
def get_milestone_id(project, milestone, auth=False, **params):
milestones = get_milestones(project, auth=auth, **params)
for mstone in milestones:
if mstone['title'] == milestone:
return mstone['number']
else:
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def get_authors(pr):
print("getting authors for #%i" % pr['number'], file=sys.stderr)
h = make_auth_header()
r = requests.get(pr['commits_url'], headers=h)
r.raise_for_status()
commits = r.json()
authors = []
for commit in commits:
author = commit['commit']['author']
authors.append("%s <%s>" % (author['name'], author['email']))
return authors
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
'Policy', 'Signature', 'Content-Type', 'file'):
yield (key, fields.pop(key))
for (k,v) in fields.items():
yield k,v
def post_download(project, filename, name=None, description=""):
"""Upload a file to the GitHub downloads area"""
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = "https://api.github.com/repos/{project}/downloads".format(project=project)
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
return s3r
|
arokem/python-matlab-bridge | tools/gh_api.py | post_download | python | def post_download(project, filename, name=None, description=""):
if name is None:
name = os.path.basename(filename)
with open(filename, 'rb') as f:
filedata = f.read()
url = "https://api.github.com/repos/{project}/downloads".format(project=project)
payload = json.dumps(dict(name=name, size=len(filedata),
description=description))
response = requests.post(url, data=payload, headers=make_auth_header())
response.raise_for_status()
reply = json.loads(response.content)
s3_url = reply['s3_url']
fields = dict(
key=reply['path'],
acl=reply['acl'],
success_action_status=201,
Filename=reply['name'],
AWSAccessKeyId=reply['accesskeyid'],
Policy=reply['policy'],
Signature=reply['signature'],
file=(reply['name'], filedata),
)
fields['Content-Type'] = reply['mime_type']
data, content_type = encode_multipart_formdata(fields)
s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
return s3r | Upload a file to the GitHub downloads area | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/tools/gh_api.py#L263-L292 | [
"def make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n",
"def encode_multipart_formdata(fields, boundary=None):\n \"\"\"\n Encode a dictionary of ``fields`` using the multipart/form-data mime format.\n\n :param fields:\n Dictionary of fields or list of (key, value) f... | """Functions for Github API requests."""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import os
import re
import sys
import requests
import getpass
import json
try:
import requests_cache
except ImportError:
print("no cache", file=sys.stderr)
else:
requests_cache.install_cache("gh_api", expire_after=3600)
# Keyring stores passwords by a 'username', but we're not storing a username and
# password
fake_username = 'ipython_tools'
class Obj(dict):
"""Dictionary with attribute access to names."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, val):
self[name] = val
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
payload = json.dumps({'body': body})
requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request(project, num, auth=False):
"""get pull request info by number
"""
url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
response = requests.get(url, headers=header)
response.raise_for_status()
return json.loads(response.text, object_hook=Obj)
def get_pull_request_files(project, num, auth=False):
"""get list of files in a pull request"""
url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
if auth:
header = make_auth_header()
else:
header = None
return get_paged_request(url, headers=header)
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def get_paged_request(url, headers=None, **params):
"""get a full list, handling APIv3's paging"""
results = []
params.setdefault("per_page", 100)
while True:
if '?' in url:
params = None
print("fetching %s" % url, file=sys.stderr)
else:
print("fetching %s with %s" % (url, params), file=sys.stderr)
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
results.extend(response.json())
if 'next' in response.links:
url = response.links['next']['url']
else:
break
return results
def get_pulls_list(project, auth=False, **params):
"""get pull request list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/pulls".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_issues_list(project, auth=False, **params):
"""get issues list"""
params.setdefault("state", "closed")
url = "https://api.github.com/repos/{project}/issues".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
pages = get_paged_request(url, headers=headers, **params)
return pages
def get_milestones(project, auth=False, **params):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
if auth:
headers = make_auth_header()
else:
headers = None
milestones = get_paged_request(url, headers=headers, **params)
return milestones
def get_milestone_id(project, milestone, auth=False, **params):
milestones = get_milestones(project, auth=auth, **params)
for mstone in milestones:
if mstone['title'] == milestone:
return mstone['number']
else:
raise ValueError("milestone %s not found" % milestone)
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def get_authors(pr):
print("getting authors for #%i" % pr['number'], file=sys.stderr)
h = make_auth_header()
r = requests.get(pr['commits_url'], headers=h)
r.raise_for_status()
commits = r.json()
authors = []
for commit in commits:
author = commit['commit']['author']
authors.append("%s <%s>" % (author['name'], author['email']))
return authors
# encode_multipart_formdata is from urllib3.filepost
# The only change is to iter_fields, to enforce S3's required key ordering
def iter_fields(fields):
fields = fields.copy()
for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
'Policy', 'Signature', 'Content-Type', 'file'):
yield (key, fields.pop(key))
for (k,v) in fields.items():
yield k,v
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
# copy requests imports in here:
from io import BytesIO
from requests.packages.urllib3.filepost import (
choose_boundary, six, writer, b, get_content_type
)
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
|
arokem/python-matlab-bridge | pymatbridge/messenger/make.py | is_executable_file | python | def is_executable_file(path):
# follow symlinks,
fpath = os.path.realpath(path)
# return False for non-files (directories, fifo, etc.)
if not os.path.isfile(fpath):
return False
# On Solaris, etc., "If the process has appropriate privileges, an
# implementation may indicate success for X_OK even if none of the
# execute file permission bits are set."
#
# For this reason, it is necessary to explicitly check st_mode
# get file mode using os.stat, and check if `other',
# that is anybody, may read and execute.
mode = os.stat(fpath).st_mode
if mode & stat.S_IROTH and mode & stat.S_IXOTH:
return True
# get current user's group ids, and check if `group',
# when matching ours, may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_gid in user_gids and
mode & stat.S_IRGRP and mode & stat.S_IXGRP):
return True
# finally, if file owner matches our effective userid,
# check if `user', may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_uid == os.geteuid() and
mode & stat.S_IRUSR and mode & stat.S_IXUSR):
return True
return False | Checks that path is an executable regular file (or a symlink to a file).
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``, but
on some platforms :func:`os.access` gives us the wrong answer, so this
checks permission bits directly.
Note
----
This function is taken from the pexpect module, see module doc-string for
license. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/messenger/make.py#L40-L85 | null | #!/usr/bin/python
"""
Make : building messenger mex file.
Some functions have been taken from the pexpect module (https://pexpect.readthedocs.org/en/latest/)
The license for pexpect is below:
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from __future__ import print_function
import os
import platform
import sys
import shlex
import shutil
import subprocess
import stat
try:
import pty
except ImportError:
pty = None
def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '' and is_executable_file(filename):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if pty:
if is_executable_file(ff):
return ff
else:
pathext = os.environ.get('Pathext', '.exe;.com;.bat;.cmd')
pathext = pathext.split(os.pathsep) + ['']
for ext in pathext:
if os.access(ff + ext, os.X_OK):
return ff + ext
return None
use_shell = True if sys.platform.startswith("win32") else False
def make_str(byte_or_str):
return byte_or_str if isinstance(byte_or_str, str) \
else str(byte_or_str.decode("UTF-8"))
def esc(path):
if ' ' in path:
return '"' + path + '"'
else:
return path
def get_messenger_dir():
# Check the system platform first
splatform = sys.platform
if splatform.startswith('linux'):
messenger_dir = 'mexa64'
elif splatform.startswith('darwin'):
messenger_dir = 'mexmaci64'
elif splatform.startswith('win32'):
if splatform == "win32":
# We have a win64 messenger, so we need to figure out if this is 32
# or 64 bit Windows:
if not platform.machine().endswith('64'):
raise ValueError("pymatbridge does not work on win32")
# We further need to differentiate 32 from 64 bit:
maxint = sys.maxsize
if maxint == 9223372036854775807:
messenger_dir = 'mexw64'
elif maxint == 2147483647:
messenger_dir = 'mexw32'
return messenger_dir
def get_config():
messenger_dir = get_messenger_dir()
with open(os.path.join(messenger_dir, 'local.cfg')) as fid:
lines = fid.readlines()
cfg = {}
for line in lines:
if '=' not in line:
continue
name, path = line.split('=')
cfg[name.lower()] = path.strip() or '.'
return cfg
def do_build(make_cmd, messenger_exe):
print('Building %s...' % messenger_exe)
print(make_cmd)
messenger_dir = get_messenger_dir()
subprocess.check_output(shlex.split(make_cmd), shell=use_shell)
messenger_loc = os.path.join(messenger_dir, messenger_exe)
shutil.move(messenger_exe, messenger_loc)
if os.path.exists('messenger.o'):
os.remove('messenger.o')
def build_octave():
paths = "-L%(octave_lib)s -I%(octave_inc)s -L%(zmq_lib)s -I%(zmq_inc)s"
paths = paths % get_config()
make_cmd = "mkoctfile --mex %s -lzmq ./src/messenger.c" % paths
do_build(make_cmd, 'messenger.mex')
def which_matlab():
try:
matlab_path = which('matlab').strip()
matlab_path = make_str(matlab_path)
return os.path.dirname(os.path.realpath(matlab_path))
except (OSError, subprocess.CalledProcessError):
def ensure_path(path, extension=''):
return os.path.isdir(path) and \
os.path.isfile(os.path.join(path, "matlab" + extension))
# need to guess the location of MATLAB
if sys.platform.startswith("darwin"):
MATLABs = [os.path.join("/Applications", i, "bin")
for i in os.listdir("/Applications")
if i.startswith("MATLAB_R")]
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(ensure_path, MATLABs)))
return MATLABs[-1] if len(MATLABs) > 0 else None
elif sys.platform.startswith("win32"):
MATLAB_loc = "C:\\Program Files\\MATLAB"
print(MATLAB_loc)
if not os.path.isdir(MATLAB_loc):
return None
MATLABs = [os.path.join(MATLAB_loc, i, "bin")
for i in os.listdir(MATLAB_loc)]
print(MATLABs)
print(i)
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(lambda x: ensure_path(x, ".exe"),
MATLABs)))
print(MATLABs)
return MATLABs[-1] if len(MATLABs) > 0 else None
elif sys.platform.startswith("linux"):
MATLAB_loc = "/usr/local/MATLAB/"
if not os.path.isdir(MATLAB_loc):
return None
MATLABs = [os.path.join(MATLAB_loc, i, "bin")
for i in os.listdir(MATLAB_loc)
if i.startswith("R")]
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(ensure_path, MATLABs)))
return MATLABs[-1] if len(MATLABs) > 0 else None
def build_matlab(static=False):
"""build the messenger mex for MATLAB
static : bool
Determines if the zmq library has been statically linked.
If so, it will append the command line option -DZMQ_STATIC
when compiling the mex so it matches libzmq.
"""
cfg = get_config()
# To deal with spaces, remove quotes now, and add
# to the full commands themselves.
if 'matlab_bin' in cfg and cfg['matlab_bin'] != '.':
matlab_bin = cfg['matlab_bin'].strip('"')
else: # attempt to autodetect MATLAB filepath
matlab_bin = which_matlab()
if matlab_bin is None:
raise ValueError("specify 'matlab_bin' in cfg file")
# Get the extension
extcmd = esc(os.path.join(matlab_bin, "mexext"))
extension = subprocess.check_output(extcmd, shell=use_shell)
extension = extension.decode('utf-8').rstrip('\r\n')
# Build the mex file
mex = esc(os.path.join(matlab_bin, "mex"))
paths = "-L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = '%s -O %s -lzmq ./src/messenger.c' % (mex, paths)
if static:
make_cmd += ' -DZMQ_STATIC'
do_build(make_cmd, 'messenger.%s' % extension)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"target",
choices=["matlab", "octave"],
type=str.lower,
help="target to be built")
parser.add_argument("--static", action="store_true",
help="staticly link libzmq")
args = parser.parse_args()
if args.target == "matlab":
build_matlab(static=args.static)
elif args.target == "octave":
if args.static:
raise ValueError("static building not yet supported for octave")
build_octave()
else:
raise ValueError()
|
arokem/python-matlab-bridge | pymatbridge/messenger/make.py | which | python | def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '' and is_executable_file(filename):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if pty:
if is_executable_file(ff):
return ff
else:
pathext = os.environ.get('Pathext', '.exe;.com;.bat;.cmd')
pathext = pathext.split(os.pathsep) + ['']
for ext in pathext:
if os.access(ff + ext, os.X_OK):
return ff + ext
return None | This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/messenger/make.py#L88-L118 | [
"def is_executable_file(path):\n \"\"\"Checks that path is an executable regular file (or a symlink to a file).\n\n This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``, but\n on some platforms :func:`os.access` gives us the wrong answer, so this\n checks permission bits directly.\n\n ... | #!/usr/bin/python
"""
Make : building messenger mex file.
Some functions have been taken from the pexpect module (https://pexpect.readthedocs.org/en/latest/)
The license for pexpect is below:
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from __future__ import print_function
import os
import platform
import sys
import shlex
import shutil
import subprocess
import stat
try:
import pty
except ImportError:
pty = None
def is_executable_file(path):
"""Checks that path is an executable regular file (or a symlink to a file).
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``, but
on some platforms :func:`os.access` gives us the wrong answer, so this
checks permission bits directly.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
"""
# follow symlinks,
fpath = os.path.realpath(path)
# return False for non-files (directories, fifo, etc.)
if not os.path.isfile(fpath):
return False
# On Solaris, etc., "If the process has appropriate privileges, an
# implementation may indicate success for X_OK even if none of the
# execute file permission bits are set."
#
# For this reason, it is necessary to explicitly check st_mode
# get file mode using os.stat, and check if `other',
# that is anybody, may read and execute.
mode = os.stat(fpath).st_mode
if mode & stat.S_IROTH and mode & stat.S_IXOTH:
return True
# get current user's group ids, and check if `group',
# when matching ours, may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_gid in user_gids and
mode & stat.S_IRGRP and mode & stat.S_IXGRP):
return True
# finally, if file owner matches our effective userid,
# check if `user', may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_uid == os.geteuid() and
mode & stat.S_IRUSR and mode & stat.S_IXUSR):
return True
return False
use_shell = True if sys.platform.startswith("win32") else False
def make_str(byte_or_str):
return byte_or_str if isinstance(byte_or_str, str) \
else str(byte_or_str.decode("UTF-8"))
def esc(path):
if ' ' in path:
return '"' + path + '"'
else:
return path
def get_messenger_dir():
# Check the system platform first
splatform = sys.platform
if splatform.startswith('linux'):
messenger_dir = 'mexa64'
elif splatform.startswith('darwin'):
messenger_dir = 'mexmaci64'
elif splatform.startswith('win32'):
if splatform == "win32":
# We have a win64 messenger, so we need to figure out if this is 32
# or 64 bit Windows:
if not platform.machine().endswith('64'):
raise ValueError("pymatbridge does not work on win32")
# We further need to differentiate 32 from 64 bit:
maxint = sys.maxsize
if maxint == 9223372036854775807:
messenger_dir = 'mexw64'
elif maxint == 2147483647:
messenger_dir = 'mexw32'
return messenger_dir
def get_config():
messenger_dir = get_messenger_dir()
with open(os.path.join(messenger_dir, 'local.cfg')) as fid:
lines = fid.readlines()
cfg = {}
for line in lines:
if '=' not in line:
continue
name, path = line.split('=')
cfg[name.lower()] = path.strip() or '.'
return cfg
def do_build(make_cmd, messenger_exe):
print('Building %s...' % messenger_exe)
print(make_cmd)
messenger_dir = get_messenger_dir()
subprocess.check_output(shlex.split(make_cmd), shell=use_shell)
messenger_loc = os.path.join(messenger_dir, messenger_exe)
shutil.move(messenger_exe, messenger_loc)
if os.path.exists('messenger.o'):
os.remove('messenger.o')
def build_octave():
paths = "-L%(octave_lib)s -I%(octave_inc)s -L%(zmq_lib)s -I%(zmq_inc)s"
paths = paths % get_config()
make_cmd = "mkoctfile --mex %s -lzmq ./src/messenger.c" % paths
do_build(make_cmd, 'messenger.mex')
def which_matlab():
try:
matlab_path = which('matlab').strip()
matlab_path = make_str(matlab_path)
return os.path.dirname(os.path.realpath(matlab_path))
except (OSError, subprocess.CalledProcessError):
def ensure_path(path, extension=''):
return os.path.isdir(path) and \
os.path.isfile(os.path.join(path, "matlab" + extension))
# need to guess the location of MATLAB
if sys.platform.startswith("darwin"):
MATLABs = [os.path.join("/Applications", i, "bin")
for i in os.listdir("/Applications")
if i.startswith("MATLAB_R")]
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(ensure_path, MATLABs)))
return MATLABs[-1] if len(MATLABs) > 0 else None
elif sys.platform.startswith("win32"):
MATLAB_loc = "C:\\Program Files\\MATLAB"
print(MATLAB_loc)
if not os.path.isdir(MATLAB_loc):
return None
MATLABs = [os.path.join(MATLAB_loc, i, "bin")
for i in os.listdir(MATLAB_loc)]
print(MATLABs)
print(i)
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(lambda x: ensure_path(x, ".exe"),
MATLABs)))
print(MATLABs)
return MATLABs[-1] if len(MATLABs) > 0 else None
elif sys.platform.startswith("linux"):
MATLAB_loc = "/usr/local/MATLAB/"
if not os.path.isdir(MATLAB_loc):
return None
MATLABs = [os.path.join(MATLAB_loc, i, "bin")
for i in os.listdir(MATLAB_loc)
if i.startswith("R")]
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(ensure_path, MATLABs)))
return MATLABs[-1] if len(MATLABs) > 0 else None
def build_matlab(static=False):
"""build the messenger mex for MATLAB
static : bool
Determines if the zmq library has been statically linked.
If so, it will append the command line option -DZMQ_STATIC
when compiling the mex so it matches libzmq.
"""
cfg = get_config()
# To deal with spaces, remove quotes now, and add
# to the full commands themselves.
if 'matlab_bin' in cfg and cfg['matlab_bin'] != '.':
matlab_bin = cfg['matlab_bin'].strip('"')
else: # attempt to autodetect MATLAB filepath
matlab_bin = which_matlab()
if matlab_bin is None:
raise ValueError("specify 'matlab_bin' in cfg file")
# Get the extension
extcmd = esc(os.path.join(matlab_bin, "mexext"))
extension = subprocess.check_output(extcmd, shell=use_shell)
extension = extension.decode('utf-8').rstrip('\r\n')
# Build the mex file
mex = esc(os.path.join(matlab_bin, "mex"))
paths = "-L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = '%s -O %s -lzmq ./src/messenger.c' % (mex, paths)
if static:
make_cmd += ' -DZMQ_STATIC'
do_build(make_cmd, 'messenger.%s' % extension)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"target",
choices=["matlab", "octave"],
type=str.lower,
help="target to be built")
parser.add_argument("--static", action="store_true",
help="staticly link libzmq")
args = parser.parse_args()
if args.target == "matlab":
build_matlab(static=args.static)
elif args.target == "octave":
if args.static:
raise ValueError("static building not yet supported for octave")
build_octave()
else:
raise ValueError()
|
arokem/python-matlab-bridge | pymatbridge/messenger/make.py | build_matlab | python | def build_matlab(static=False):
cfg = get_config()
# To deal with spaces, remove quotes now, and add
# to the full commands themselves.
if 'matlab_bin' in cfg and cfg['matlab_bin'] != '.':
matlab_bin = cfg['matlab_bin'].strip('"')
else: # attempt to autodetect MATLAB filepath
matlab_bin = which_matlab()
if matlab_bin is None:
raise ValueError("specify 'matlab_bin' in cfg file")
# Get the extension
extcmd = esc(os.path.join(matlab_bin, "mexext"))
extension = subprocess.check_output(extcmd, shell=use_shell)
extension = extension.decode('utf-8').rstrip('\r\n')
# Build the mex file
mex = esc(os.path.join(matlab_bin, "mex"))
paths = "-L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = '%s -O %s -lzmq ./src/messenger.c' % (mex, paths)
if static:
make_cmd += ' -DZMQ_STATIC'
do_build(make_cmd, 'messenger.%s' % extension) | build the messenger mex for MATLAB
static : bool
Determines if the zmq library has been statically linked.
If so, it will append the command line option -DZMQ_STATIC
when compiling the mex so it matches libzmq. | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/messenger/make.py#L242-L270 | [
"def esc(path):\n if ' ' in path:\n return '\"' + path + '\"'\n else:\n return path\n",
"def get_config():\n messenger_dir = get_messenger_dir()\n with open(os.path.join(messenger_dir, 'local.cfg')) as fid:\n lines = fid.readlines()\n\n cfg = {}\n for line in lines:\n ... | #!/usr/bin/python
"""
Make : building messenger mex file.
Some functions have been taken from the pexpect module (https://pexpect.readthedocs.org/en/latest/)
The license for pexpect is below:
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
from __future__ import print_function
import os
import platform
import sys
import shlex
import shutil
import subprocess
import stat
try:
import pty
except ImportError:
pty = None
def is_executable_file(path):
"""Checks that path is an executable regular file (or a symlink to a file).
This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``, but
on some platforms :func:`os.access` gives us the wrong answer, so this
checks permission bits directly.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
"""
# follow symlinks,
fpath = os.path.realpath(path)
# return False for non-files (directories, fifo, etc.)
if not os.path.isfile(fpath):
return False
# On Solaris, etc., "If the process has appropriate privileges, an
# implementation may indicate success for X_OK even if none of the
# execute file permission bits are set."
#
# For this reason, it is necessary to explicitly check st_mode
# get file mode using os.stat, and check if `other',
# that is anybody, may read and execute.
mode = os.stat(fpath).st_mode
if mode & stat.S_IROTH and mode & stat.S_IXOTH:
return True
# get current user's group ids, and check if `group',
# when matching ours, may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_gid in user_gids and
mode & stat.S_IRGRP and mode & stat.S_IXGRP):
return True
# finally, if file owner matches our effective userid,
# check if `user', may read and execute.
user_gids = os.getgroups() + [os.getgid()]
if (os.stat(fpath).st_uid == os.geteuid() and
mode & stat.S_IRUSR and mode & stat.S_IXUSR):
return True
return False
def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '' and is_executable_file(filename):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if pty:
if is_executable_file(ff):
return ff
else:
pathext = os.environ.get('Pathext', '.exe;.com;.bat;.cmd')
pathext = pathext.split(os.pathsep) + ['']
for ext in pathext:
if os.access(ff + ext, os.X_OK):
return ff + ext
return None
use_shell = True if sys.platform.startswith("win32") else False
def make_str(byte_or_str):
return byte_or_str if isinstance(byte_or_str, str) \
else str(byte_or_str.decode("UTF-8"))
def esc(path):
if ' ' in path:
return '"' + path + '"'
else:
return path
def get_messenger_dir():
# Check the system platform first
splatform = sys.platform
if splatform.startswith('linux'):
messenger_dir = 'mexa64'
elif splatform.startswith('darwin'):
messenger_dir = 'mexmaci64'
elif splatform.startswith('win32'):
if splatform == "win32":
# We have a win64 messenger, so we need to figure out if this is 32
# or 64 bit Windows:
if not platform.machine().endswith('64'):
raise ValueError("pymatbridge does not work on win32")
# We further need to differentiate 32 from 64 bit:
maxint = sys.maxsize
if maxint == 9223372036854775807:
messenger_dir = 'mexw64'
elif maxint == 2147483647:
messenger_dir = 'mexw32'
return messenger_dir
def get_config():
messenger_dir = get_messenger_dir()
with open(os.path.join(messenger_dir, 'local.cfg')) as fid:
lines = fid.readlines()
cfg = {}
for line in lines:
if '=' not in line:
continue
name, path = line.split('=')
cfg[name.lower()] = path.strip() or '.'
return cfg
def do_build(make_cmd, messenger_exe):
print('Building %s...' % messenger_exe)
print(make_cmd)
messenger_dir = get_messenger_dir()
subprocess.check_output(shlex.split(make_cmd), shell=use_shell)
messenger_loc = os.path.join(messenger_dir, messenger_exe)
shutil.move(messenger_exe, messenger_loc)
if os.path.exists('messenger.o'):
os.remove('messenger.o')
def build_octave():
paths = "-L%(octave_lib)s -I%(octave_inc)s -L%(zmq_lib)s -I%(zmq_inc)s"
paths = paths % get_config()
make_cmd = "mkoctfile --mex %s -lzmq ./src/messenger.c" % paths
do_build(make_cmd, 'messenger.mex')
def which_matlab():
try:
matlab_path = which('matlab').strip()
matlab_path = make_str(matlab_path)
return os.path.dirname(os.path.realpath(matlab_path))
except (OSError, subprocess.CalledProcessError):
def ensure_path(path, extension=''):
return os.path.isdir(path) and \
os.path.isfile(os.path.join(path, "matlab" + extension))
# need to guess the location of MATLAB
if sys.platform.startswith("darwin"):
MATLABs = [os.path.join("/Applications", i, "bin")
for i in os.listdir("/Applications")
if i.startswith("MATLAB_R")]
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(ensure_path, MATLABs)))
return MATLABs[-1] if len(MATLABs) > 0 else None
elif sys.platform.startswith("win32"):
MATLAB_loc = "C:\\Program Files\\MATLAB"
print(MATLAB_loc)
if not os.path.isdir(MATLAB_loc):
return None
MATLABs = [os.path.join(MATLAB_loc, i, "bin")
for i in os.listdir(MATLAB_loc)]
print(MATLABs)
print(i)
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(lambda x: ensure_path(x, ".exe"),
MATLABs)))
print(MATLABs)
return MATLABs[-1] if len(MATLABs) > 0 else None
elif sys.platform.startswith("linux"):
MATLAB_loc = "/usr/local/MATLAB/"
if not os.path.isdir(MATLAB_loc):
return None
MATLABs = [os.path.join(MATLAB_loc, i, "bin")
for i in os.listdir(MATLAB_loc)
if i.startswith("R")]
# only want ones with MATLAB executables
# sort so we can get the latest
MATLABs = list(sorted(filter(ensure_path, MATLABs)))
return MATLABs[-1] if len(MATLABs) > 0 else None
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"target",
choices=["matlab", "octave"],
type=str.lower,
help="target to be built")
parser.add_argument("--static", action="store_true",
help="staticly link libzmq")
args = parser.parse_args()
if args.target == "matlab":
build_matlab(static=args.static)
elif args.target == "octave":
if args.static:
raise ValueError("static building not yet supported for octave")
build_octave()
else:
raise ValueError()
|
mrcagney/make_gtfs | make_gtfs/cli.py | make_gtfs | python | def make_gtfs(source_path, target_path, buffer, ndigits):
pfeed = pf.read_protofeed(source_path)
feed = m.build_feed(pfeed, buffer=buffer)
gt.write_gtfs(feed, target_path, ndigits=ndigits) | Create a GTFS feed from the files in the directory SOURCE_PATH.
See the project README for a description of the required source
files.
Save the feed to the file or directory TARGET_PATH.
If the target path ends in '.zip', then write the feed as a zip
archive.
Otherwise assume the path is a directory, and write the feed as a
collection of CSV files to that directory, creating the directory
if it does not exist.
If a stops file is present, then search within ``buffer`` meters
on the traffic side of trip paths for stops.
Round all decimals to ndigits decimal places.
All distances in the resulting GTFS feed will be in kilometers. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/cli.py#L18-L37 | [
"def read_protofeed(path):\n \"\"\"\n Read the data files at the given directory path\n (string or Path object) and build a ProtoFeed from them.\n Validate the resulting ProtoFeed.\n If invalid, raise a ``ValueError`` specifying the errors.\n Otherwise, return the resulting ProtoFeed.\n\n The d... | import click
import gtfstk as gt
from . import protofeed as pf
from . import constants as cs
from . import main as m
@click.command(short_help="Create a GTFS feed from simpler files")
@click.argument('source_path', type=click.Path())
@click.argument('target_path', type=click.Path())
@click.option('-b', '--buffer', default=cs.BUFFER, type=float,
show_default=True,
help="Meters to buffer trip paths to find stops")
@click.option('-n', '--ndigits', default=6, type=int,
show_default=True,
help="Number of decimal places to round float values in the output "
"GTFS feed")
|
mrcagney/make_gtfs | make_gtfs/protofeed.py | read_protofeed | python | def read_protofeed(path):
path = Path(path)
service_windows = pd.read_csv(
path/'service_windows.csv')
meta = pd.read_csv(path/'meta.csv',
dtype={'start_date': str, 'end_date': str})
shapes = gpd.read_file(str(path/'shapes.geojson'), driver='GeoJSON')
if (path/'stops.csv').exists():
stops = (
pd.read_csv(path/'stops.csv', dtype={
'stop_id': str,
'stop_code': str,
'zone_id': str,
'location_type': int,
'parent_station': str,
'stop_timezone': str,
'wheelchair_boarding': int,
})
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
.dropna(subset=['stop_lon', 'stop_lat'], how='any')
)
else:
stops = None
frequencies = pd.read_csv(path/'frequencies.csv', dtype={
'route_short_name': str,
'service_window_id': str,
'shape_id': str,
'direction': int,
'frequency': int,
})
pfeed = ProtoFeed(frequencies, meta, service_windows, shapes, stops)
# Validate
v = vd.validate(pfeed)
if 'error' in v.type.values:
raise ValueError(
"Invalid ProtoFeed files:\n\n" + v.to_string(justify='left'))
return pfeed | Read the data files at the given directory path
(string or Path object) and build a ProtoFeed from them.
Validate the resulting ProtoFeed.
If invalid, raise a ``ValueError`` specifying the errors.
Otherwise, return the resulting ProtoFeed.
The data files needed to build a ProtoFeed are
- ``frequencies.csv``: (required) A CSV file containing route frequency
information. The CSV file contains the columns
- ``route_short_name``: (required) String. A unique short name
for the route, e.g. '51X'
- ``route_long_name``: (required) String. Full name of the route
that is more descriptive than ``route_short_name``
- ``route_type``: (required) Integer. The
`GTFS type of the route <https://developers.google.com/transit/gtfs/reference/#routestxt>`_
- ``service_window_id`` (required): String. A service window ID
for the route taken from the file ``service_windows.csv``
- ``direction``: (required) Integer 0, 1, or 2. Indicates
whether the route travels in GTFS direction 0, GTFS direction
1, or in both directions.
In the latter case, trips will be created that travel in both
directions along the route's path, each direction operating at
the given frequency. Otherwise, trips will be created that
travel in only the given direction.
- ``frequency`` (required): Integer. The frequency of the route
during the service window in vehicles per hour.
- ``speed``: (optional) Float. The speed of the route in
kilometers per hour
- ``shape_id``: (required) String. A shape ID that is listed in
``shapes.geojson`` and corresponds to the linestring of the
(route, direction, service window) tuple.
- ``meta.csv``: (required) A CSV file containing network metadata.
The CSV file contains the columns
- ``agency_name``: (required) String. The name of the transport
agency
- ``agency_url``: (required) String. A fully qualified URL for
the transport agency
- ``agency_timezone``: (required) String. Timezone where the
transit agency is located. Timezone names never contain the
space character but may contain an underscore. Refer to
`http://en.wikipedia.org/wiki/List_of_tz_zones <http://en.wikipedia.org/wiki/List_of_tz_zones>`_ for a list of valid values
- ``start_date``, ``end_date`` (required): Strings. The start
and end dates for which all this network information is valid
formated as YYYYMMDD strings
- ``default_route_speed``: (required) Float. Default speed in
kilometers per hour to assign to routes with no ``speed``
entry in the file ``routes.csv``
- ``service_windows.csv``: (required) A CSV file containing service window
information.
A *service window* is a time interval and a set of days of the
week during which all routes have constant service frequency,
e.g. Saturday and Sunday 07:00 to 09:00.
The CSV file contains the columns
- ``service_window_id``: (required) String. A unique identifier
for a service window
- ``start_time``, ``end_time``: (required) Strings. The start
and end times of the service window in HH:MM:SS format where
the hour is less than 24
- ``monday``, ``tuesday``, ``wednesday``, ``thursday``,
``friday``, ``saturday``, ``sunday`` (required): Integer 0
or 1. Indicates whether the service is active on the given day
(1) or not (0)
- ``shapes.geojson``: (required) A GeoJSON file containing route shapes.
The file consists of one feature collection of LineString
features, where each feature's properties contains at least the
attribute ``shape_id``, which links the route's shape to the
route's information in ``routes.csv``.
- ``stops.csv``: (optional) A CSV file containing all the required
and optional fields of ``stops.txt`` in
`the GTFS <https://developers.google.com/transit/gtfs/reference/#stopstxt>`_ | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/protofeed.py#L88-L213 | [
"def validate(pfeed, *, as_df=True, include_warnings=True):\n \"\"\"\n Check whether the given pfeed satisfies the ProtoFeed spec.\n\n Parameters\n ----------\n pfeed : ProtoFeed\n as_df : boolean\n If ``True``, then return the resulting report as a DataFrame;\n otherwise return the ... | from pathlib import Path
import geopandas as gpd
import pandas as pd
import numpy as np
from . import constants as cs
from . import validators as vd
class ProtoFeed(object):
"""
A ProtoFeed instance holds the source data
from which to build a GTFS feed, plus a little metadata.
Attributes are
- ``service_windows``: DataFrame
- ``frequencies``: DataFrame; has speeds filled in
- ``meta``: DataFrame
- ``shapes``: GeoDataFrame
- ``shapes_extra``: dictionary of the form <shape ID> ->
<trip directions using the shape (0, 1, or 2)>
"""
def __init__(self, frequencies=None, meta=None, service_windows=None,
shapes=None, stops=None):
self.frequencies = frequencies
self.meta = meta
self.service_windows = service_windows
self.shapes = shapes
self.stops = stops
# Clean frequencies
freq = self.frequencies
if freq is not None:
cols = freq.columns
# Fill missing route types with 3 (bus)
freq['route_type'].fillna(3, inplace=True)
freq['route_type'] = freq['route_type'].astype(int)
# Create route speeds and fill in missing values with default speeds
if 'speed' not in cols:
freq['speed'] = np.nan
freq['speed'].fillna(self.meta['default_route_speed'].iat[0],
inplace=True)
self.frequencies = freq
# Build shapes extra from shape IDs in frequencies
if self.frequencies is not None:
def my_agg(group):
d = {}
dirs = group.direction.unique()
if len(dirs) > 1 or 2 in dirs:
d['direction'] = 2
else:
d['direction'] = dirs[0]
return pd.Series(d)
self.shapes_extra = dict(
self.frequencies
.groupby('shape_id')
.apply(my_agg)
.reset_index()
.values
)
else:
self.shapes_extra = None
def copy(self):
"""
Return a copy of this ProtoFeed, that is, a feed with all the
same attributes.
"""
other = ProtoFeed()
for key in cs.PROTOFEED_ATTRS:
value = getattr(self, key)
if isinstance(value, pd.DataFrame):
# Pandas copy DataFrame
value = value.copy()
setattr(other, key, value)
return other
|
mrcagney/make_gtfs | make_gtfs/protofeed.py | ProtoFeed.copy | python | def copy(self):
other = ProtoFeed()
for key in cs.PROTOFEED_ATTRS:
value = getattr(self, key)
if isinstance(value, pd.DataFrame):
# Pandas copy DataFrame
value = value.copy()
setattr(other, key, value)
return other | Return a copy of this ProtoFeed, that is, a feed with all the
same attributes. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/protofeed.py#L73-L86 | null | class ProtoFeed(object):
"""
A ProtoFeed instance holds the source data
from which to build a GTFS feed, plus a little metadata.
Attributes are
- ``service_windows``: DataFrame
- ``frequencies``: DataFrame; has speeds filled in
- ``meta``: DataFrame
- ``shapes``: GeoDataFrame
- ``shapes_extra``: dictionary of the form <shape ID> ->
<trip directions using the shape (0, 1, or 2)>
"""
def __init__(self, frequencies=None, meta=None, service_windows=None,
shapes=None, stops=None):
self.frequencies = frequencies
self.meta = meta
self.service_windows = service_windows
self.shapes = shapes
self.stops = stops
# Clean frequencies
freq = self.frequencies
if freq is not None:
cols = freq.columns
# Fill missing route types with 3 (bus)
freq['route_type'].fillna(3, inplace=True)
freq['route_type'] = freq['route_type'].astype(int)
# Create route speeds and fill in missing values with default speeds
if 'speed' not in cols:
freq['speed'] = np.nan
freq['speed'].fillna(self.meta['default_route_speed'].iat[0],
inplace=True)
self.frequencies = freq
# Build shapes extra from shape IDs in frequencies
if self.frequencies is not None:
def my_agg(group):
d = {}
dirs = group.direction.unique()
if len(dirs) > 1 or 2 in dirs:
d['direction'] = 2
else:
d['direction'] = dirs[0]
return pd.Series(d)
self.shapes_extra = dict(
self.frequencies
.groupby('shape_id')
.apply(my_agg)
.reset_index()
.values
)
else:
self.shapes_extra = None
|
mrcagney/make_gtfs | make_gtfs/validators.py | check_for_required_columns | python | def check_for_required_columns(problems, table, df):
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems | Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L24-L66 | null | """
Validators for ProtoFeeds.
Designed along the lines of gtfstk.validators.py.
"""
import numbers
import pandas as pd
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def valid_speed(x):
"""
Return ``True`` if ``x`` is a positive number;
otherwise return ``False``.
"""
if isinstance(x, numbers.Number) and x > 0:
return True
else:
return False
def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems
def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df)
def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df)
def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df)
def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df)
def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False)
def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df)
|
mrcagney/make_gtfs | make_gtfs/validators.py | check_for_invalid_columns | python | def check_for_invalid_columns(problems, table, df):
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems | Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L68-L110 | null | """
Validators for ProtoFeeds.
Designed along the lines of gtfstk.validators.py.
"""
import numbers
import pandas as pd
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def valid_speed(x):
"""
Return ``True`` if ``x`` is a positive number;
otherwise return ``False``.
"""
if isinstance(x, numbers.Number) and x > 0:
return True
else:
return False
def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems
def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df)
def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df)
def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df)
def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df)
def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False)
def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df)
|
mrcagney/make_gtfs | make_gtfs/validators.py | check_frequencies | python | def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df) | Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L112-L167 | [
"def check_for_required_columns(problems, table, df):\n \"\"\"\n Check that the given ProtoFeed table has the required columns.\n\n Parameters\n ----------\n problems : list\n A four-tuple containing\n\n 1. A problem type (string) equal to ``'error'`` or ``'warning'``;\n ``'er... | """
Validators for ProtoFeeds.
Designed along the lines of gtfstk.validators.py.
"""
import numbers
import pandas as pd
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def valid_speed(x):
"""
Return ``True`` if ``x`` is a positive number;
otherwise return ``False``.
"""
if isinstance(x, numbers.Number) and x > 0:
return True
else:
return False
def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems
def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems
def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df)
def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df)
def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df)
def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False)
def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df)
|
mrcagney/make_gtfs | make_gtfs/validators.py | check_meta | python | def check_meta(pfeed, *, as_df=False, include_warnings=False):
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df) | Analog of :func:`check_frequencies` for ``pfeed.meta`` | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L169-L210 | [
"def check_for_required_columns(problems, table, df):\n \"\"\"\n Check that the given ProtoFeed table has the required columns.\n\n Parameters\n ----------\n problems : list\n A four-tuple containing\n\n 1. A problem type (string) equal to ``'error'`` or ``'warning'``;\n ``'er... | """
Validators for ProtoFeeds.
Designed along the lines of gtfstk.validators.py.
"""
import numbers
import pandas as pd
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def valid_speed(x):
"""
Return ``True`` if ``x`` is a positive number;
otherwise return ``False``.
"""
if isinstance(x, numbers.Number) and x > 0:
return True
else:
return False
def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems
def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems
def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df)
def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df)
def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df)
def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False)
def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df)
|
mrcagney/make_gtfs | make_gtfs/validators.py | check_service_windows | python | def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df) | Analog of :func:`check_frequencies` for ``pfeed.service_windows`` | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L212-L245 | [
"def check_for_required_columns(problems, table, df):\n \"\"\"\n Check that the given ProtoFeed table has the required columns.\n\n Parameters\n ----------\n problems : list\n A four-tuple containing\n\n 1. A problem type (string) equal to ``'error'`` or ``'warning'``;\n ``'er... | """
Validators for ProtoFeeds.
Designed along the lines of gtfstk.validators.py.
"""
import numbers
import pandas as pd
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def valid_speed(x):
"""
Return ``True`` if ``x`` is a positive number;
otherwise return ``False``.
"""
if isinstance(x, numbers.Number) and x > 0:
return True
else:
return False
def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems
def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems
def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df)
def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df)
def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df)
def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False)
def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df)
|
mrcagney/make_gtfs | make_gtfs/validators.py | check_shapes | python | def check_shapes(pfeed, *, as_df=False, include_warnings=False):
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df) | Analog of :func:`check_frequencies` for ``pfeed.shapes`` | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L247-L273 | [
"def check_for_required_columns(problems, table, df):\n \"\"\"\n Check that the given ProtoFeed table has the required columns.\n\n Parameters\n ----------\n problems : list\n A four-tuple containing\n\n 1. A problem type (string) equal to ``'error'`` or ``'warning'``;\n ``'er... | """
Validators for ProtoFeeds.
Designed along the lines of gtfstk.validators.py.
"""
import numbers
import pandas as pd
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def valid_speed(x):
"""
Return ``True`` if ``x`` is a positive number;
otherwise return ``False``.
"""
if isinstance(x, numbers.Number) and x > 0:
return True
else:
return False
def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems
def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems
def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df)
def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df)
def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df)
def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False)
def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df)
|
mrcagney/make_gtfs | make_gtfs/validators.py | check_stops | python | def check_stops(pfeed, *, as_df=False, include_warnings=False):
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False) | Analog of :func:`check_frequencies` for ``pfeed.stops`` | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L275-L284 | null | """
Validators for ProtoFeeds.
Designed along the lines of gtfstk.validators.py.
"""
import numbers
import pandas as pd
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def valid_speed(x):
"""
Return ``True`` if ``x`` is a positive number;
otherwise return ``False``.
"""
if isinstance(x, numbers.Number) and x > 0:
return True
else:
return False
def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems
def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems
def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df)
def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df)
def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df)
def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df)
def validate(pfeed, *, as_df=True, include_warnings=True):
"""
Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df)
|
mrcagney/make_gtfs | make_gtfs/validators.py | validate | python | def validate(pfeed, *, as_df=True, include_warnings=True):
problems = []
# Check for invalid columns and check the required tables
checkers = [
'check_frequencies',
'check_meta',
'check_service_windows',
'check_shapes',
'check_stops',
]
for checker in checkers:
problems.extend(globals()[checker](pfeed,
include_warnings=include_warnings))
return gt.format_problems(problems, as_df=as_df) | Check whether the given pfeed satisfies the ProtoFeed spec.
Parameters
----------
pfeed : ProtoFeed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the ProtoFeed
spec is violated; 'warning' means there is a problem but it's
not a ProtoFeed spec violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the pfeed is missing required tables or required
columns. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/validators.py#L286-L336 | null | """
Validators for ProtoFeeds.
Designed along the lines of gtfstk.validators.py.
"""
import numbers
import pandas as pd
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def valid_speed(x):
"""
Return ``True`` if ``x`` is a positive number;
otherwise return ``False``.
"""
if isinstance(x, numbers.Number) and x > 0:
return True
else:
return False
def check_for_required_columns(problems, table, df):
"""
Check that the given ProtoFeed table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by
the ProtoFeed spec
and append to the problems list one error for each column
missing.
"""
r = cs.PROTOFEED_REF
req_columns = r.loc[(r['table'] == table) & r['column_required'],
'column'].values
for col in req_columns:
if col not in df.columns:
problems.append(['error', 'Missing column {!s}'.format(col),
table, []])
return problems
def check_for_invalid_columns(problems, table, df):
"""
Check for invalid columns in the given ProtoFeed DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or
``'warning'``;
``'error'`` means the ProtoFeed is violated;
``'warning'`` means there is a problem but it is not a
ProtoFeed violation
2. A message (string) that describes the problem
3. A ProtoFeed table name, e.g. ``'meta'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a ProtoFeed table
df : DataFrame
The ProtoFeed table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
ProtoFeed and append to the problems list one warning for each extra
column.
"""
r = cs.PROTOFEED_REF
valid_columns = r.loc[r['table'] == table, 'column'].values
for col in df.columns:
if col not in valid_columns:
problems.append(['warning',
'Unrecognized column {!s}'.format(col),
table, []])
return problems
def check_frequencies(pfeed, *, as_df=False, include_warnings=False):
"""
Check that ``pfeed.frequency`` follows the ProtoFeed spec.
Return a list of problems of the form described in
:func:`gt.check_table`;
the list will be empty if no problems are found.
"""
table = 'frequencies'
problems = []
# Preliminary checks
if pfeed.frequencies is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_short_name and route_long_name
for column in ['route_short_name', 'route_long_name']:
problems = gt.check_column(problems, table, f, column, gt.valid_str,
column_required=False)
cond = ~(f['route_short_name'].notnull() | f['route_long_name'].notnull())
problems = gt.check_table(problems, table, f, cond,
'route_short_name and route_long_name both empty')
# Check route_type
v = lambda x: x in range(8)
problems = gt.check_column(problems, table, f, 'route_type', v)
# Check service window ID
problems = gt.check_column_linked_id(problems, table, f,
'service_window_id', pfeed.service_windows)
# Check direction
v = lambda x: x in range(3)
problems = gt.check_column(problems, table, f, 'direction', v)
# Check frequency
v = lambda x: isinstance(x, int)
problems = gt.check_column(problems, table, f, 'frequency', v)
# Check speed
problems = gt.check_column(problems, table, f, 'speed', valid_speed,
column_required=False)
# Check shape ID
problems = gt.check_column_linked_id(problems, table, f, 'shape_id',
pfeed.shapes)
return gt.format_problems(problems, as_df=as_df)
def check_meta(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.meta``
"""
table = 'meta'
problems = []
# Preliminary checks
if pfeed.meta is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.meta.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
if f.shape[0] > 1:
problems.append(['error', 'Meta must have only one row',
table, list(range(1, f.shape[0]))])
# Check agency_name
problems = gt.check_column(problems, table, f, 'agency_name', gt.valid_str)
# Check agency_url
problems = gt.check_column(problems, table, f, 'agency_url', gt.valid_url)
# Check agency_timezone
problems = gt.check_column(problems, table, f, 'agency_timezone',
gt.valid_timezone)
# Check start_date and end_date
for col in ['start_date', 'end_date']:
problems = gt.check_column(problems, table, f, col, gt.valid_date)
# Check default_route_speed
problems = gt.check_column(problems, table, f, 'default_route_speed',
valid_speed)
return gt.format_problems(problems, as_df=as_df)
def check_service_windows(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.service_windows``
"""
table = 'service_windows'
problems = []
# Preliminary checks
if pfeed.service_windows is None:
problems.append(['error', 'Missing table', table, []])
else:
f = pfeed.service_windows.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service window ID
problems = gt.check_column_id(problems, table, f,
'service_window_id')
# Check start_time and end_time
for column in ['start_time', 'end_time']:
problems = gt.check_column(problems, table, f, column, gt.valid_time)
# Check weekday columns
v = lambda x: x in range(2)
for col in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
problems = gt.check_column(problems, table, f, col, v)
return gt.format_problems(problems, as_df=as_df)
def check_shapes(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.shapes``
"""
table = 'shapes'
problems = []
# Preliminary checks
if pfeed.shapes is None:
return problems
f = pfeed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return gt.format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = gt.check_column(problems, table, f, 'shape_id', gt.valid_str)
# Check geometry
v = lambda x: isinstance(x, sg.LineString) and not x.is_empty
problems = gt.check_column(problems, table, f, 'geometry', v)
return gt.format_problems(problems, as_df=as_df)
def check_stops(pfeed, *, as_df=False, include_warnings=False):
"""
Analog of :func:`check_frequencies` for ``pfeed.stops``
"""
# Use gtfstk's stop validator
if pfeed.stops is not None:
stop_times = pd.DataFrame(columns=['stop_id'])
feed = gt.Feed(stops=pfeed.stops, stop_times=stop_times,
dist_units='km')
return gt.check_stops(feed, as_df=as_df, include_warnings=False)
|
mrcagney/make_gtfs | make_gtfs/main.py | get_duration | python | def get_duration(timestr1, timestr2, units='s'):
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600 | Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L10-L30 | null | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | build_stop_ids | python | def build_stop_ids(shape_id):
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)] | Create a pair of stop IDs based on the given shape ID. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L32-L36 | null | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | build_agency | python | def build_agency(pfeed):
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0]) | Given a ProtoFeed, return a DataFrame representing ``agency.txt`` | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L45-L53 | null | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | build_calendar_etc | python | def build_calendar_etc(pfeed):
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window | Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L55-L90 | [
"def get_sid(bitlist):\n return 'srv' + ''.join([str(b) for b in bitlist])\n"
] | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | build_routes | python | def build_routes(pfeed):
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f | Given a ProtoFeed, return a DataFrame representing ``routes.txt``. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L92-L104 | null | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.