repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone._set_rain_delay | python | def _set_rain_delay(self, zoneid, value):
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True | Generic method to set auto_watering program. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L250-L272 | [
"def preupdate(self, force_refresh=True):\n \"\"\"Return a dict with all current options prior submitting request.\"\"\"\n ddata = MANUAL_OP_DATA.copy()\n\n # force update to make sure status is accurate\n if force_refresh:\n self.update()\n\n # select current controller and faucet\n ddata[... | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _set_zone_name(self, zoneid, name):
"""Private method to override zone name."""
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
def _set_watering_time(self, zoneid, value):
"""Private method to set watering_time per zone."""
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
# TODO in a future release we should break this out. artifact of old API
@property
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True
@property
def auto_watering(self):
"""Return if zone is configured to automatic watering."""
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value)
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def _to_dict(self):
"""Method to build zone dict."""
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
}
def report(self):
"""Return status from zone."""
return self._to_dict()
def preupdate(self, force_refresh=True):
"""Return a dict with all current options prior submitting request."""
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata
def submit_action(self, ddata):
"""Post data."""
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT)
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone._set_auto_watering | python | def _set_auto_watering(self, zoneid, value):
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True | Private method to set auto_watering program. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L291-L306 | [
"def preupdate(self, force_refresh=True):\n \"\"\"Return a dict with all current options prior submitting request.\"\"\"\n ddata = MANUAL_OP_DATA.copy()\n\n # force update to make sure status is accurate\n if force_refresh:\n self.update()\n\n # select current controller and faucet\n ddata[... | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _set_zone_name(self, zoneid, name):
"""Private method to override zone name."""
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
def _set_watering_time(self, zoneid, value):
"""Private method to set watering_time per zone."""
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
# TODO in a future release we should break this out. artifact of old API
@property
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
def _set_rain_delay(self, zoneid, value):
"""Generic method to set auto_watering program."""
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
@property
def auto_watering(self):
"""Return if zone is configured to automatic watering."""
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value)
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def _to_dict(self):
"""Method to build zone dict."""
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
}
def report(self):
"""Return status from zone."""
return self._to_dict()
def preupdate(self, force_refresh=True):
"""Return a dict with all current options prior submitting request."""
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata
def submit_action(self, ddata):
"""Post data."""
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT)
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone.auto_watering | python | def auto_watering(self):
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value) | Return if zone is configured to automatic watering. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L309-L312 | [
"def find_program_status(data, zone):\n \"\"\"\n Find on the HTML document if zoneX has the configuration\n of the auto-schedule/program (auto_watering) enabled.\n\n # expected result if enabled\n #<input checked=\"checked\" class=\"switch\" id=\"id_zone2_program_toggle\" \\\n name=\"zone2_pro... | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _set_zone_name(self, zoneid, name):
"""Private method to override zone name."""
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
def _set_watering_time(self, zoneid, value):
"""Private method to set watering_time per zone."""
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
# TODO in a future release we should break this out. artifact of old API
@property
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
def _set_rain_delay(self, zoneid, value):
"""Generic method to set auto_watering program."""
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True
@property
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def _to_dict(self):
"""Method to build zone dict."""
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
}
def report(self):
"""Return status from zone."""
return self._to_dict()
def preupdate(self, force_refresh=True):
"""Return a dict with all current options prior submitting request."""
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata
def submit_action(self, ddata):
"""Post data."""
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT)
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone._to_dict | python | def _to_dict(self):
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
} | Method to build zone dict. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L324-L341 | null | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _set_zone_name(self, zoneid, name):
"""Private method to override zone name."""
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
def _set_watering_time(self, zoneid, value):
"""Private method to set watering_time per zone."""
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
# TODO in a future release we should break this out. artifact of old API
@property
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
def _set_rain_delay(self, zoneid, value):
"""Generic method to set auto_watering program."""
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True
@property
def auto_watering(self):
"""Return if zone is configured to automatic watering."""
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value)
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def report(self):
"""Return status from zone."""
return self._to_dict()
def preupdate(self, force_refresh=True):
"""Return a dict with all current options prior submitting request."""
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata
def submit_action(self, ddata):
"""Post data."""
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT)
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone.preupdate | python | def preupdate(self, force_refresh=True):
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata | Return a dict with all current options prior submitting request. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L347-L386 | [
"def update(self):\n \"\"\"Callback self._controller.update().\"\"\"\n self._controller.update()\n"
] | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _set_zone_name(self, zoneid, name):
"""Private method to override zone name."""
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
def _set_watering_time(self, zoneid, value):
"""Private method to set watering_time per zone."""
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
# TODO in a future release we should break this out. artifact of old API
@property
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
def _set_rain_delay(self, zoneid, value):
"""Generic method to set auto_watering program."""
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True
@property
def auto_watering(self):
"""Return if zone is configured to automatic watering."""
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value)
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def _to_dict(self):
"""Method to build zone dict."""
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
}
def report(self):
"""Return status from zone."""
return self._to_dict()
def submit_action(self, ddata):
"""Post data."""
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT)
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone.submit_action | python | def submit_action(self, ddata):
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT) | Post data. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L388-L392 | null | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _set_zone_name(self, zoneid, name):
"""Private method to override zone name."""
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
def _set_watering_time(self, zoneid, value):
"""Private method to set watering_time per zone."""
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
# TODO in a future release we should break this out. artifact of old API
@property
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
def _set_rain_delay(self, zoneid, value):
"""Generic method to set auto_watering program."""
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True
@property
def auto_watering(self):
"""Return if zone is configured to automatic watering."""
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value)
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def _to_dict(self):
"""Method to build zone dict."""
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
}
def report(self):
"""Return status from zone."""
return self._to_dict()
def preupdate(self, force_refresh=True):
"""Return a dict with all current options prior submitting request."""
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata
|
tchellomello/raincloudy | raincloudy/core.py | RainCloudy._authenticate | python | def _authenticate(self):
# to obtain csrftoken, remove Referer from headers
headers = HEADERS.copy()
headers.pop('Referer')
# initial GET request
self.client = requests.Session()
self.client.proxies = self._proxies
self.client.verify = self._ssl_verify
self.client.stream = True
self.client.get(LOGIN_ENDPOINT, headers=headers)
# set headers to submit POST request
token = INITIAL_DATA.copy()
token['csrfmiddlewaretoken'] = self.csrftoken
token['email'] = self._username
token['password'] = self._password
req = self.client.post(LOGIN_ENDPOINT, data=token, headers=HEADERS)
if req.status_code != 302:
req.raise_for_status()
setup = self.client.get(SETUP_ENDPOINT, headers=HEADERS)
# populate device list
self.html['setup'] = generate_soup_html(setup.text)
# currently only one faucet is supported on the code
# we have future plans to support it
parsed_controller = serial_finder(self.html['setup'])
self.controllers.append(
RainCloudyController(
self,
parsed_controller['controller_serial'],
parsed_controller['faucet_serial']
)
)
self.is_connected = True
return True | Authenticate. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/core.py#L70-L109 | [
"def generate_soup_html(data):\n \"\"\"Return an BeautifulSoup HTML parser document.\"\"\"\n try:\n return BeautifulSoup(data, 'html.parser')\n except:\n raise\n",
"def serial_finder(data):\n \"\"\"\n Find controller serial and faucet_serial from the setup page.\n\n <select id=\"id... | class RainCloudy(object):
"""RainCloudy object."""
def __init__(self, username, password, http_proxy=None, https_proxy=None,
ssl_warnings=True, ssl_verify=True):
"""
Initialize RainCloud object.
:param username: username to authenticate user
:param passwrod: password to authenticate user
:param http_proxy: HTTP proxy information (127.0.0.1:8080)
:param https_proxy: HTTPs proxy information (127.0.0.1:8080)
:param ssl_warnings: Show SSL warnings
:param ssl_verify: Verify SSL server certificate
:type username: string
:type password: string
:type http_proxy: string
:type https_proxy: string
:type ssl_warnings: boolean
:type ssl_verify: boolean
:rtype: RainCloudy object
"""
self._ssl_verify = ssl_verify
if not ssl_warnings:
urllib3.disable_warnings()
# define credentials
self._username = username
self._password = password
# initialize future attributes
self.controllers = []
self.client = None
self.is_connected = False
self.html = {
'home': None,
'setup': None,
'program': None,
'manage': None,
}
# set proxy environment
self._proxies = {
"http": http_proxy,
"https": https_proxy,
}
# login
self.login()
def __repr__(self):
"""Object representation."""
return "<{0}: {1}>".format(self.__class__.__name__,
self.controller.serial)
def login(self):
"""Call login."""
self._authenticate()
@property
def csrftoken(self):
'''Return current csrftoken from request session.'''
if self.client:
return self.client.cookies.get('csrftoken')
return None
def update(self):
"""Update controller._attributes."""
self.controller.update()
@property
def controller(self):
"""Show current linked controllers."""
if hasattr(self, 'controllers'):
if len(self.controllers) > 1:
# in the future, we should support more controllers
raise TypeError("Only one controller per account.")
return self.controllers[0]
raise AttributeError("There is no controller assigned.")
def logout(self):
"""Logout."""
self.client.get(LOGOUT_ENDPOINT)
self._cleanup()
def _cleanup(self):
"""Cleanup object when logging out."""
self.client = None
self.controllers = []
self.is_connected = False
|
tchellomello/raincloudy | raincloudy/core.py | RainCloudy.controller | python | def controller(self):
if hasattr(self, 'controllers'):
if len(self.controllers) > 1:
# in the future, we should support more controllers
raise TypeError("Only one controller per account.")
return self.controllers[0]
raise AttributeError("There is no controller assigned.") | Show current linked controllers. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/core.py#L123-L130 | null | class RainCloudy(object):
"""RainCloudy object."""
def __init__(self, username, password, http_proxy=None, https_proxy=None,
ssl_warnings=True, ssl_verify=True):
"""
Initialize RainCloud object.
:param username: username to authenticate user
:param passwrod: password to authenticate user
:param http_proxy: HTTP proxy information (127.0.0.1:8080)
:param https_proxy: HTTPs proxy information (127.0.0.1:8080)
:param ssl_warnings: Show SSL warnings
:param ssl_verify: Verify SSL server certificate
:type username: string
:type password: string
:type http_proxy: string
:type https_proxy: string
:type ssl_warnings: boolean
:type ssl_verify: boolean
:rtype: RainCloudy object
"""
self._ssl_verify = ssl_verify
if not ssl_warnings:
urllib3.disable_warnings()
# define credentials
self._username = username
self._password = password
# initialize future attributes
self.controllers = []
self.client = None
self.is_connected = False
self.html = {
'home': None,
'setup': None,
'program': None,
'manage': None,
}
# set proxy environment
self._proxies = {
"http": http_proxy,
"https": https_proxy,
}
# login
self.login()
def __repr__(self):
"""Object representation."""
return "<{0}: {1}>".format(self.__class__.__name__,
self.controller.serial)
def login(self):
"""Call login."""
self._authenticate()
def _authenticate(self):
"""Authenticate."""
# to obtain csrftoken, remove Referer from headers
headers = HEADERS.copy()
headers.pop('Referer')
# initial GET request
self.client = requests.Session()
self.client.proxies = self._proxies
self.client.verify = self._ssl_verify
self.client.stream = True
self.client.get(LOGIN_ENDPOINT, headers=headers)
# set headers to submit POST request
token = INITIAL_DATA.copy()
token['csrfmiddlewaretoken'] = self.csrftoken
token['email'] = self._username
token['password'] = self._password
req = self.client.post(LOGIN_ENDPOINT, data=token, headers=HEADERS)
if req.status_code != 302:
req.raise_for_status()
setup = self.client.get(SETUP_ENDPOINT, headers=HEADERS)
# populate device list
self.html['setup'] = generate_soup_html(setup.text)
# currently only one faucet is supported on the code
# we have future plans to support it
parsed_controller = serial_finder(self.html['setup'])
self.controllers.append(
RainCloudyController(
self,
parsed_controller['controller_serial'],
parsed_controller['faucet_serial']
)
)
self.is_connected = True
return True
@property
def csrftoken(self):
'''Return current csrftoken from request session.'''
if self.client:
return self.client.cookies.get('csrftoken')
return None
def update(self):
"""Update controller._attributes."""
self.controller.update()
@property
def logout(self):
"""Logout."""
self.client.get(LOGOUT_ENDPOINT)
self._cleanup()
def _cleanup(self):
"""Cleanup object when logging out."""
self.client = None
self.controllers = []
self.is_connected = False
|
tchellomello/raincloudy | raincloudy/controller.py | RainCloudyController._assign_faucets | python | def _assign_faucets(self, faucets):
if not faucets:
raise TypeError("Controller does not have a faucet assigned.")
for faucet_id in faucets:
self.faucets.append(
RainCloudyFaucet(self._parent, self, faucet_id)) | Assign RainCloudyFaucet objects to self.faucets. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/controller.py#L48-L55 | null | class RainCloudyController(object):
"""RainCloudy Controller object."""
def __init__(self, parent, controller_id, faucets=None):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy parent object
:param controller_id: Control Unit ID
:param valve_id: Value Unit ID assigned controller
:type parent: RainCloudy object
:type controller_id: string
:type valve_id: string
:return: RainCloudyController object
:rtype: RainCloudyController object
"""
self.attributes = None
self._parent = parent
self._controller_id = controller_id
self._verify_parent()
# faucets associated with controller
self.faucets = []
# load assigned faucets
self._assign_faucets(faucets)
# populate controller attributes
self.update()
def _verify_parent(self):
"""Verify parent type."""
if not isinstance(self._parent, raincloudy.core.RainCloudy):
raise TypeError("Invalid parent object.")
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def post(self, ddata, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT):
"""Method to update some attributes on namespace."""
headers = HEADERS.copy()
if referer is None:
headers.pop('Referer')
else:
headers['Referer'] = referer
# append csrftoken
if 'csrfmiddlewaretoken' not in ddata.keys():
ddata['csrfmiddlewaretoken'] = self._parent.csrftoken
req = self._parent.client.post(url, headers=headers, data=ddata)
if req.status_code == 200:
self.update()
def _get_cu_and_fu_status(self):
"""Submit GET request to update information."""
# adjust headers
headers = HEADERS.copy()
headers['Accept'] = '*/*'
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['X-CSRFToken'] = self._parent.csrftoken
args = '?controller_serial=' + self.serial \
+ '&faucet_serial=' + self.faucet.serial
req = self._parent.client.get(STATUS_ENDPOINT + args,
headers=headers)
# token probably expired, then try again
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self.attributes = req.json()
else:
req.raise_for_status()
def _refresh_html_home(self):
"""
Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle).
"""
req = self._parent.client.get(HOME_ENDPOINT)
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self._parent.html['home'] = generate_soup_html(req.text)
else:
req.raise_for_status()
def update(self):
"""
Call 2 methods to update zone attributes and html['home'] object
"""
# update zone attributes
self._get_cu_and_fu_status()
# update self._parent.html['home'] for gathering
# auto_watering status (program_toggle tag)
self._refresh_html_home()
@property
def serial(self):
"""Return controller id."""
return self._controller_id
# pylint: disable=invalid-name
@property
def id(self):
"""Return controller id."""
return self.serial
@property
def name(self):
"""Return controller name."""
return \
find_controller_or_faucet_name(self._parent.html['home'],
'controller')
@name.setter
def name(self, value):
"""Set a new name to controller."""
data = {
'_set_controller_name': 'Set Name',
'controller_name': value,
}
self.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT)
@property
def status(self):
"""Return controller status."""
return self.attributes['controller_status']
@property
def current_time(self):
"""Return controller current time."""
return self.attributes['current_time']
@property
def faucet(self):
"""Show current linked faucet."""
if hasattr(self, 'faucets'):
if len(self.faucets) > 1:
# in the future, we should support more faucets
raise TypeError("Only one faucet per account.")
return self.faucets[0]
raise AttributeError("There is no faucet assigned.")
|
tchellomello/raincloudy | raincloudy/controller.py | RainCloudyController.post | python | def post(self, ddata, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT):
headers = HEADERS.copy()
if referer is None:
headers.pop('Referer')
else:
headers['Referer'] = referer
# append csrftoken
if 'csrfmiddlewaretoken' not in ddata.keys():
ddata['csrfmiddlewaretoken'] = self._parent.csrftoken
req = self._parent.client.post(url, headers=headers, data=ddata)
if req.status_code == 200:
self.update() | Method to update some attributes on namespace. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/controller.py#L64-L78 | [
"def update(self):\n \"\"\"\n Call 2 methods to update zone attributes and html['home'] object\n \"\"\"\n # update zone attributes\n self._get_cu_and_fu_status()\n\n # update self._parent.html['home'] for gathering\n # auto_watering status (program_toggle tag)\n self._refresh_html_home()\n"
... | class RainCloudyController(object):
"""RainCloudy Controller object."""
def __init__(self, parent, controller_id, faucets=None):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy parent object
:param controller_id: Control Unit ID
:param valve_id: Value Unit ID assigned controller
:type parent: RainCloudy object
:type controller_id: string
:type valve_id: string
:return: RainCloudyController object
:rtype: RainCloudyController object
"""
self.attributes = None
self._parent = parent
self._controller_id = controller_id
self._verify_parent()
# faucets associated with controller
self.faucets = []
# load assigned faucets
self._assign_faucets(faucets)
# populate controller attributes
self.update()
def _verify_parent(self):
"""Verify parent type."""
if not isinstance(self._parent, raincloudy.core.RainCloudy):
raise TypeError("Invalid parent object.")
def _assign_faucets(self, faucets):
"""Assign RainCloudyFaucet objects to self.faucets."""
if not faucets:
raise TypeError("Controller does not have a faucet assigned.")
for faucet_id in faucets:
self.faucets.append(
RainCloudyFaucet(self._parent, self, faucet_id))
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _get_cu_and_fu_status(self):
"""Submit GET request to update information."""
# adjust headers
headers = HEADERS.copy()
headers['Accept'] = '*/*'
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['X-CSRFToken'] = self._parent.csrftoken
args = '?controller_serial=' + self.serial \
+ '&faucet_serial=' + self.faucet.serial
req = self._parent.client.get(STATUS_ENDPOINT + args,
headers=headers)
# token probably expired, then try again
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self.attributes = req.json()
else:
req.raise_for_status()
def _refresh_html_home(self):
"""
Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle).
"""
req = self._parent.client.get(HOME_ENDPOINT)
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self._parent.html['home'] = generate_soup_html(req.text)
else:
req.raise_for_status()
def update(self):
"""
Call 2 methods to update zone attributes and html['home'] object
"""
# update zone attributes
self._get_cu_and_fu_status()
# update self._parent.html['home'] for gathering
# auto_watering status (program_toggle tag)
self._refresh_html_home()
@property
def serial(self):
"""Return controller id."""
return self._controller_id
# pylint: disable=invalid-name
@property
def id(self):
"""Return controller id."""
return self.serial
@property
def name(self):
"""Return controller name."""
return \
find_controller_or_faucet_name(self._parent.html['home'],
'controller')
@name.setter
def name(self, value):
"""Set a new name to controller."""
data = {
'_set_controller_name': 'Set Name',
'controller_name': value,
}
self.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT)
@property
def status(self):
"""Return controller status."""
return self.attributes['controller_status']
@property
def current_time(self):
"""Return controller current time."""
return self.attributes['current_time']
@property
def faucet(self):
"""Show current linked faucet."""
if hasattr(self, 'faucets'):
if len(self.faucets) > 1:
# in the future, we should support more faucets
raise TypeError("Only one faucet per account.")
return self.faucets[0]
raise AttributeError("There is no faucet assigned.")
|
tchellomello/raincloudy | raincloudy/controller.py | RainCloudyController._get_cu_and_fu_status | python | def _get_cu_and_fu_status(self):
# adjust headers
headers = HEADERS.copy()
headers['Accept'] = '*/*'
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['X-CSRFToken'] = self._parent.csrftoken
args = '?controller_serial=' + self.serial \
+ '&faucet_serial=' + self.faucet.serial
req = self._parent.client.get(STATUS_ENDPOINT + args,
headers=headers)
# token probably expired, then try again
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self.attributes = req.json()
else:
req.raise_for_status() | Submit GET request to update information. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/controller.py#L80-L101 | [
"def update(self):\n \"\"\"\n Call 2 methods to update zone attributes and html['home'] object\n \"\"\"\n # update zone attributes\n self._get_cu_and_fu_status()\n\n # update self._parent.html['home'] for gathering\n # auto_watering status (program_toggle tag)\n self._refresh_html_home()\n"
... | class RainCloudyController(object):
"""RainCloudy Controller object."""
def __init__(self, parent, controller_id, faucets=None):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy parent object
:param controller_id: Control Unit ID
:param valve_id: Value Unit ID assigned controller
:type parent: RainCloudy object
:type controller_id: string
:type valve_id: string
:return: RainCloudyController object
:rtype: RainCloudyController object
"""
self.attributes = None
self._parent = parent
self._controller_id = controller_id
self._verify_parent()
# faucets associated with controller
self.faucets = []
# load assigned faucets
self._assign_faucets(faucets)
# populate controller attributes
self.update()
def _verify_parent(self):
"""Verify parent type."""
if not isinstance(self._parent, raincloudy.core.RainCloudy):
raise TypeError("Invalid parent object.")
def _assign_faucets(self, faucets):
"""Assign RainCloudyFaucet objects to self.faucets."""
if not faucets:
raise TypeError("Controller does not have a faucet assigned.")
for faucet_id in faucets:
self.faucets.append(
RainCloudyFaucet(self._parent, self, faucet_id))
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def post(self, ddata, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT):
"""Method to update some attributes on namespace."""
headers = HEADERS.copy()
if referer is None:
headers.pop('Referer')
else:
headers['Referer'] = referer
# append csrftoken
if 'csrfmiddlewaretoken' not in ddata.keys():
ddata['csrfmiddlewaretoken'] = self._parent.csrftoken
req = self._parent.client.post(url, headers=headers, data=ddata)
if req.status_code == 200:
self.update()
def _refresh_html_home(self):
"""
Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle).
"""
req = self._parent.client.get(HOME_ENDPOINT)
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self._parent.html['home'] = generate_soup_html(req.text)
else:
req.raise_for_status()
def update(self):
"""
Call 2 methods to update zone attributes and html['home'] object
"""
# update zone attributes
self._get_cu_and_fu_status()
# update self._parent.html['home'] for gathering
# auto_watering status (program_toggle tag)
self._refresh_html_home()
@property
def serial(self):
"""Return controller id."""
return self._controller_id
# pylint: disable=invalid-name
@property
def id(self):
"""Return controller id."""
return self.serial
@property
def name(self):
"""Return controller name."""
return \
find_controller_or_faucet_name(self._parent.html['home'],
'controller')
@name.setter
def name(self, value):
"""Set a new name to controller."""
data = {
'_set_controller_name': 'Set Name',
'controller_name': value,
}
self.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT)
@property
def status(self):
"""Return controller status."""
return self.attributes['controller_status']
@property
def current_time(self):
"""Return controller current time."""
return self.attributes['current_time']
@property
def faucet(self):
"""Show current linked faucet."""
if hasattr(self, 'faucets'):
if len(self.faucets) > 1:
# in the future, we should support more faucets
raise TypeError("Only one faucet per account.")
return self.faucets[0]
raise AttributeError("There is no faucet assigned.")
|
tchellomello/raincloudy | raincloudy/controller.py | RainCloudyController._refresh_html_home | python | def _refresh_html_home(self):
req = self._parent.client.get(HOME_ENDPOINT)
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self._parent.html['home'] = generate_soup_html(req.text)
else:
req.raise_for_status() | Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle). | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/controller.py#L103-L116 | [
"def generate_soup_html(data):\n \"\"\"Return an BeautifulSoup HTML parser document.\"\"\"\n try:\n return BeautifulSoup(data, 'html.parser')\n except:\n raise\n",
"def update(self):\n \"\"\"\n Call 2 methods to update zone attributes and html['home'] object\n \"\"\"\n # update ... | class RainCloudyController(object):
"""RainCloudy Controller object."""
def __init__(self, parent, controller_id, faucets=None):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy parent object
:param controller_id: Control Unit ID
:param valve_id: Value Unit ID assigned controller
:type parent: RainCloudy object
:type controller_id: string
:type valve_id: string
:return: RainCloudyController object
:rtype: RainCloudyController object
"""
self.attributes = None
self._parent = parent
self._controller_id = controller_id
self._verify_parent()
# faucets associated with controller
self.faucets = []
# load assigned faucets
self._assign_faucets(faucets)
# populate controller attributes
self.update()
def _verify_parent(self):
"""Verify parent type."""
if not isinstance(self._parent, raincloudy.core.RainCloudy):
raise TypeError("Invalid parent object.")
def _assign_faucets(self, faucets):
"""Assign RainCloudyFaucet objects to self.faucets."""
if not faucets:
raise TypeError("Controller does not have a faucet assigned.")
for faucet_id in faucets:
self.faucets.append(
RainCloudyFaucet(self._parent, self, faucet_id))
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def post(self, ddata, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT):
"""Method to update some attributes on namespace."""
headers = HEADERS.copy()
if referer is None:
headers.pop('Referer')
else:
headers['Referer'] = referer
# append csrftoken
if 'csrfmiddlewaretoken' not in ddata.keys():
ddata['csrfmiddlewaretoken'] = self._parent.csrftoken
req = self._parent.client.post(url, headers=headers, data=ddata)
if req.status_code == 200:
self.update()
def _get_cu_and_fu_status(self):
"""Submit GET request to update information."""
# adjust headers
headers = HEADERS.copy()
headers['Accept'] = '*/*'
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['X-CSRFToken'] = self._parent.csrftoken
args = '?controller_serial=' + self.serial \
+ '&faucet_serial=' + self.faucet.serial
req = self._parent.client.get(STATUS_ENDPOINT + args,
headers=headers)
# token probably expired, then try again
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self.attributes = req.json()
else:
req.raise_for_status()
def update(self):
"""
Call 2 methods to update zone attributes and html['home'] object
"""
# update zone attributes
self._get_cu_and_fu_status()
# update self._parent.html['home'] for gathering
# auto_watering status (program_toggle tag)
self._refresh_html_home()
@property
def serial(self):
"""Return controller id."""
return self._controller_id
# pylint: disable=invalid-name
@property
def id(self):
"""Return controller id."""
return self.serial
@property
def name(self):
"""Return controller name."""
return \
find_controller_or_faucet_name(self._parent.html['home'],
'controller')
@name.setter
def name(self, value):
"""Set a new name to controller."""
data = {
'_set_controller_name': 'Set Name',
'controller_name': value,
}
self.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT)
@property
def status(self):
"""Return controller status."""
return self.attributes['controller_status']
@property
def current_time(self):
"""Return controller current time."""
return self.attributes['current_time']
@property
def faucet(self):
"""Show current linked faucet."""
if hasattr(self, 'faucets'):
if len(self.faucets) > 1:
# in the future, we should support more faucets
raise TypeError("Only one faucet per account.")
return self.faucets[0]
raise AttributeError("There is no faucet assigned.")
|
tchellomello/raincloudy | raincloudy/controller.py | RainCloudyController.name | python | def name(self, value):
data = {
'_set_controller_name': 'Set Name',
'controller_name': value,
}
self.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT) | Set a new name to controller. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/controller.py#L148-L154 | [
"def find_controller_or_faucet_name(data, p_type):\n \"\"\"\n Find on the HTML document the controller name.\n\n # expected result\n <label for=\"select_controller\">\n <span class=\"more_info\" id=\"#styling-type-light\" data-hasqtip=\"26\" \\\n title=\"Select Control Unit to display.\" >Contro... | class RainCloudyController(object):
"""RainCloudy Controller object."""
def __init__(self, parent, controller_id, faucets=None):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy parent object
:param controller_id: Control Unit ID
:param valve_id: Value Unit ID assigned controller
:type parent: RainCloudy object
:type controller_id: string
:type valve_id: string
:return: RainCloudyController object
:rtype: RainCloudyController object
"""
self.attributes = None
self._parent = parent
self._controller_id = controller_id
self._verify_parent()
# faucets associated with controller
self.faucets = []
# load assigned faucets
self._assign_faucets(faucets)
# populate controller attributes
self.update()
def _verify_parent(self):
"""Verify parent type."""
if not isinstance(self._parent, raincloudy.core.RainCloudy):
raise TypeError("Invalid parent object.")
def _assign_faucets(self, faucets):
"""Assign RainCloudyFaucet objects to self.faucets."""
if not faucets:
raise TypeError("Controller does not have a faucet assigned.")
for faucet_id in faucets:
self.faucets.append(
RainCloudyFaucet(self._parent, self, faucet_id))
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def post(self, ddata, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT):
"""Method to update some attributes on namespace."""
headers = HEADERS.copy()
if referer is None:
headers.pop('Referer')
else:
headers['Referer'] = referer
# append csrftoken
if 'csrfmiddlewaretoken' not in ddata.keys():
ddata['csrfmiddlewaretoken'] = self._parent.csrftoken
req = self._parent.client.post(url, headers=headers, data=ddata)
if req.status_code == 200:
self.update()
def _get_cu_and_fu_status(self):
"""Submit GET request to update information."""
# adjust headers
headers = HEADERS.copy()
headers['Accept'] = '*/*'
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['X-CSRFToken'] = self._parent.csrftoken
args = '?controller_serial=' + self.serial \
+ '&faucet_serial=' + self.faucet.serial
req = self._parent.client.get(STATUS_ENDPOINT + args,
headers=headers)
# token probably expired, then try again
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self.attributes = req.json()
else:
req.raise_for_status()
def _refresh_html_home(self):
"""
Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle).
"""
req = self._parent.client.get(HOME_ENDPOINT)
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self._parent.html['home'] = generate_soup_html(req.text)
else:
req.raise_for_status()
def update(self):
"""
Call 2 methods to update zone attributes and html['home'] object
"""
# update zone attributes
self._get_cu_and_fu_status()
# update self._parent.html['home'] for gathering
# auto_watering status (program_toggle tag)
self._refresh_html_home()
@property
def serial(self):
"""Return controller id."""
return self._controller_id
# pylint: disable=invalid-name
@property
def id(self):
"""Return controller id."""
return self.serial
@property
def name(self):
"""Return controller name."""
return \
find_controller_or_faucet_name(self._parent.html['home'],
'controller')
@name.setter
@property
def status(self):
"""Return controller status."""
return self.attributes['controller_status']
@property
def current_time(self):
"""Return controller current time."""
return self.attributes['current_time']
@property
def faucet(self):
"""Show current linked faucet."""
if hasattr(self, 'faucets'):
if len(self.faucets) > 1:
# in the future, we should support more faucets
raise TypeError("Only one faucet per account.")
return self.faucets[0]
raise AttributeError("There is no faucet assigned.")
|
tchellomello/raincloudy | raincloudy/controller.py | RainCloudyController.faucet | python | def faucet(self):
if hasattr(self, 'faucets'):
if len(self.faucets) > 1:
# in the future, we should support more faucets
raise TypeError("Only one faucet per account.")
return self.faucets[0]
raise AttributeError("There is no faucet assigned.") | Show current linked faucet. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/controller.py#L167-L174 | null | class RainCloudyController(object):
"""RainCloudy Controller object."""
def __init__(self, parent, controller_id, faucets=None):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy parent object
:param controller_id: Control Unit ID
:param valve_id: Value Unit ID assigned controller
:type parent: RainCloudy object
:type controller_id: string
:type valve_id: string
:return: RainCloudyController object
:rtype: RainCloudyController object
"""
self.attributes = None
self._parent = parent
self._controller_id = controller_id
self._verify_parent()
# faucets associated with controller
self.faucets = []
# load assigned faucets
self._assign_faucets(faucets)
# populate controller attributes
self.update()
def _verify_parent(self):
"""Verify parent type."""
if not isinstance(self._parent, raincloudy.core.RainCloudy):
raise TypeError("Invalid parent object.")
def _assign_faucets(self, faucets):
"""Assign RainCloudyFaucet objects to self.faucets."""
if not faucets:
raise TypeError("Controller does not have a faucet assigned.")
for faucet_id in faucets:
self.faucets.append(
RainCloudyFaucet(self._parent, self, faucet_id))
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def post(self, ddata, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT):
"""Method to update some attributes on namespace."""
headers = HEADERS.copy()
if referer is None:
headers.pop('Referer')
else:
headers['Referer'] = referer
# append csrftoken
if 'csrfmiddlewaretoken' not in ddata.keys():
ddata['csrfmiddlewaretoken'] = self._parent.csrftoken
req = self._parent.client.post(url, headers=headers, data=ddata)
if req.status_code == 200:
self.update()
def _get_cu_and_fu_status(self):
"""Submit GET request to update information."""
# adjust headers
headers = HEADERS.copy()
headers['Accept'] = '*/*'
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['X-CSRFToken'] = self._parent.csrftoken
args = '?controller_serial=' + self.serial \
+ '&faucet_serial=' + self.faucet.serial
req = self._parent.client.get(STATUS_ENDPOINT + args,
headers=headers)
# token probably expired, then try again
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self.attributes = req.json()
else:
req.raise_for_status()
def _refresh_html_home(self):
"""
Function to refresh the self._parent.html['home'] object
which provides the status if zones are scheduled to
start automatically (program_toggle).
"""
req = self._parent.client.get(HOME_ENDPOINT)
if req.status_code == 403:
self._parent.login()
self.update()
elif req.status_code == 200:
self._parent.html['home'] = generate_soup_html(req.text)
else:
req.raise_for_status()
def update(self):
"""
Call 2 methods to update zone attributes and html['home'] object
"""
# update zone attributes
self._get_cu_and_fu_status()
# update self._parent.html['home'] for gathering
# auto_watering status (program_toggle tag)
self._refresh_html_home()
@property
def serial(self):
"""Return controller id."""
return self._controller_id
# pylint: disable=invalid-name
@property
def id(self):
"""Return controller id."""
return self.serial
@property
def name(self):
"""Return controller name."""
return \
find_controller_or_faucet_name(self._parent.html['home'],
'controller')
@name.setter
def name(self, value):
"""Set a new name to controller."""
data = {
'_set_controller_name': 'Set Name',
'controller_name': value,
}
self.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT)
@property
def status(self):
"""Return controller status."""
return self.attributes['controller_status']
@property
def current_time(self):
"""Return controller current time."""
return self.attributes['current_time']
@property
|
tchellomello/raincloudy | raincloudy/helpers.py | serial_finder | python | def serial_finder(data):
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautifulSoup HTML element.")
try:
# The setup page contains a select box for each controller and each
# faucet
controllersElement = data.find_all('select',
{'id': 'id_select_controller2'})
faucetsElement = data.find_all('select',
{'id': 'id_select_faucet2'})
controllerSerial = controllersElement[0].text.split('-')[1].strip()
faucetSerial = faucetsElement[0].text.split('-')[1].strip()
# currently only one faucet is supported on the code
# we have plans to support it in the future
parsed_dict = {}
parsed_dict['controller_serial'] = controllerSerial
parsed_dict['faucet_serial'] = [faucetSerial]
return parsed_dict
except (AttributeError, IndexError, ValueError):
raise RainCloudyException(
'Could not find any valid controller or faucet') | Find controller serial and faucet_serial from the setup page.
<select id="id_select_controller2" name="select_controller" >
<option value='0' selected='selected'>1 - Controller001</option>
</select>
:param data: text to be parsed
:type data: BeautilSoup object
:return: a dict with controller_serial and faucet_serial
:rtype: dict
:raises IndexError: if controller_serial was not found on the data | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/helpers.py#L15-L54 | null | # -*- coding: utf-8 -*-
"""Raincloudy helpers."""
from bs4 import BeautifulSoup
from raincloudy.exceptions import RainCloudyException
def generate_soup_html(data):
"""Return an BeautifulSoup HTML parser document."""
try:
return BeautifulSoup(data, 'html.parser')
except:
raise
def find_program_status(data, zone):
"""
Find on the HTML document if zoneX has the configuration
of the auto-schedule/program (auto_watering) enabled.
# expected result if enabled
#<input checked="checked" class="switch" id="id_zone2_program_toggle" \
name="zone2_program_toggle" onchange="submit()" type="checkbox"/>
# expected result if disabled
#<input class="switch" id="id_zone1_program_toggle" \
name="zone1_program_toggle" onchange="submit()" type="checkbox"/>
:param data: BeautifulSoup object
:param zone: zone name from class='switch'
:return: boolean if zone has program enabled
:rtype: boolean
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: if object not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
try:
child = data.find_all('input', {'class': 'switch'})
zone_id = 'id_{0}_program_toggle'.format(zone)
for member in child:
if member.get('type') == 'checkbox' and \
member.get('id') == zone_id:
return bool(member.has_attr('checked'))
raise IndexError
except (AttributeError, IndexError, ValueError):
raise RainCloudyException(
'Could not find any valid controller or faucet')
def find_controller_or_faucet_name(data, p_type):
"""
Find on the HTML document the controller name.
# expected result
<label for="select_controller">
<span class="more_info" id="#styling-type-light" data-hasqtip="26" \
title="Select Control Unit to display." >Control Unit:</span></label><br/>
<select class="simpleselect" id="id_select_controller" \
name="select_controller" onchange="submit()" >
<option value="0" selected="selected">HERE_IS_CONTROLLER_NAME
:param data: BeautifulSoup object
:param p_type: parameter type. (controller or faucet)
:return: controller or valve name
:rtype: string.
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: return None because controller name was not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
if not (p_type == 'controller' or p_type == 'faucet'):
raise TypeError("Function p_type must be controller or faucet")
try:
search_field = 'id_select_{0}'.format(p_type)
child = data.find('select', {'id': search_field})
return child.get_text().strip()
except AttributeError:
return None
def find_zone_name(data, zone_id):
"""
Find on the HTML document the zone name.
# expected result
<span class="more_info" \
title="Zone can be renamed on Setup tab">1 - zone1</span>,
:param data: BeautifulSoup object
:param zone: zone id
:return: zone name
:rtype: string
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: return None because controller name was not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
table = data.find('table', {'class': 'zone_table'})
table_body = table.find('tbody')
rows = table_body.find_all('span', {'class': 'more_info'})
for row in rows:
if row.get_text().startswith(str(zone_id)):
return row.get_text()[4:].strip()
return None
# vim:sw=4:ts=4:et:
|
tchellomello/raincloudy | raincloudy/helpers.py | find_program_status | python | def find_program_status(data, zone):
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
try:
child = data.find_all('input', {'class': 'switch'})
zone_id = 'id_{0}_program_toggle'.format(zone)
for member in child:
if member.get('type') == 'checkbox' and \
member.get('id') == zone_id:
return bool(member.has_attr('checked'))
raise IndexError
except (AttributeError, IndexError, ValueError):
raise RainCloudyException(
'Could not find any valid controller or faucet') | Find on the HTML document if zoneX has the configuration
of the auto-schedule/program (auto_watering) enabled.
# expected result if enabled
#<input checked="checked" class="switch" id="id_zone2_program_toggle" \
name="zone2_program_toggle" onchange="submit()" type="checkbox"/>
# expected result if disabled
#<input class="switch" id="id_zone1_program_toggle" \
name="zone1_program_toggle" onchange="submit()" type="checkbox"/>
:param data: BeautifulSoup object
:param zone: zone name from class='switch'
:return: boolean if zone has program enabled
:rtype: boolean
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: if object not found | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/helpers.py#L57-L90 | null | # -*- coding: utf-8 -*-
"""Raincloudy helpers."""
from bs4 import BeautifulSoup
from raincloudy.exceptions import RainCloudyException
def generate_soup_html(data):
"""Return an BeautifulSoup HTML parser document."""
try:
return BeautifulSoup(data, 'html.parser')
except:
raise
def serial_finder(data):
"""
Find controller serial and faucet_serial from the setup page.
<select id="id_select_controller2" name="select_controller" >
<option value='0' selected='selected'>1 - Controller001</option>
</select>
:param data: text to be parsed
:type data: BeautilSoup object
:return: a dict with controller_serial and faucet_serial
:rtype: dict
:raises IndexError: if controller_serial was not found on the data
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautifulSoup HTML element.")
try:
# The setup page contains a select box for each controller and each
# faucet
controllersElement = data.find_all('select',
{'id': 'id_select_controller2'})
faucetsElement = data.find_all('select',
{'id': 'id_select_faucet2'})
controllerSerial = controllersElement[0].text.split('-')[1].strip()
faucetSerial = faucetsElement[0].text.split('-')[1].strip()
# currently only one faucet is supported on the code
# we have plans to support it in the future
parsed_dict = {}
parsed_dict['controller_serial'] = controllerSerial
parsed_dict['faucet_serial'] = [faucetSerial]
return parsed_dict
except (AttributeError, IndexError, ValueError):
raise RainCloudyException(
'Could not find any valid controller or faucet')
def find_controller_or_faucet_name(data, p_type):
"""
Find on the HTML document the controller name.
# expected result
<label for="select_controller">
<span class="more_info" id="#styling-type-light" data-hasqtip="26" \
title="Select Control Unit to display." >Control Unit:</span></label><br/>
<select class="simpleselect" id="id_select_controller" \
name="select_controller" onchange="submit()" >
<option value="0" selected="selected">HERE_IS_CONTROLLER_NAME
:param data: BeautifulSoup object
:param p_type: parameter type. (controller or faucet)
:return: controller or valve name
:rtype: string.
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: return None because controller name was not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
if not (p_type == 'controller' or p_type == 'faucet'):
raise TypeError("Function p_type must be controller or faucet")
try:
search_field = 'id_select_{0}'.format(p_type)
child = data.find('select', {'id': search_field})
return child.get_text().strip()
except AttributeError:
return None
def find_zone_name(data, zone_id):
"""
Find on the HTML document the zone name.
# expected result
<span class="more_info" \
title="Zone can be renamed on Setup tab">1 - zone1</span>,
:param data: BeautifulSoup object
:param zone: zone id
:return: zone name
:rtype: string
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: return None because controller name was not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
table = data.find('table', {'class': 'zone_table'})
table_body = table.find('tbody')
rows = table_body.find_all('span', {'class': 'more_info'})
for row in rows:
if row.get_text().startswith(str(zone_id)):
return row.get_text()[4:].strip()
return None
# vim:sw=4:ts=4:et:
|
tchellomello/raincloudy | raincloudy/helpers.py | find_controller_or_faucet_name | python | def find_controller_or_faucet_name(data, p_type):
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
if not (p_type == 'controller' or p_type == 'faucet'):
raise TypeError("Function p_type must be controller or faucet")
try:
search_field = 'id_select_{0}'.format(p_type)
child = data.find('select', {'id': search_field})
return child.get_text().strip()
except AttributeError:
return None | Find on the HTML document the controller name.
# expected result
<label for="select_controller">
<span class="more_info" id="#styling-type-light" data-hasqtip="26" \
title="Select Control Unit to display." >Control Unit:</span></label><br/>
<select class="simpleselect" id="id_select_controller" \
name="select_controller" onchange="submit()" >
<option value="0" selected="selected">HERE_IS_CONTROLLER_NAME
:param data: BeautifulSoup object
:param p_type: parameter type. (controller or faucet)
:return: controller or valve name
:rtype: string.
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: return None because controller name was not found | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/helpers.py#L93-L123 | null | # -*- coding: utf-8 -*-
"""Raincloudy helpers."""
from bs4 import BeautifulSoup
from raincloudy.exceptions import RainCloudyException
def generate_soup_html(data):
"""Return an BeautifulSoup HTML parser document."""
try:
return BeautifulSoup(data, 'html.parser')
except:
raise
def serial_finder(data):
"""
Find controller serial and faucet_serial from the setup page.
<select id="id_select_controller2" name="select_controller" >
<option value='0' selected='selected'>1 - Controller001</option>
</select>
:param data: text to be parsed
:type data: BeautilSoup object
:return: a dict with controller_serial and faucet_serial
:rtype: dict
:raises IndexError: if controller_serial was not found on the data
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautifulSoup HTML element.")
try:
# The setup page contains a select box for each controller and each
# faucet
controllersElement = data.find_all('select',
{'id': 'id_select_controller2'})
faucetsElement = data.find_all('select',
{'id': 'id_select_faucet2'})
controllerSerial = controllersElement[0].text.split('-')[1].strip()
faucetSerial = faucetsElement[0].text.split('-')[1].strip()
# currently only one faucet is supported on the code
# we have plans to support it in the future
parsed_dict = {}
parsed_dict['controller_serial'] = controllerSerial
parsed_dict['faucet_serial'] = [faucetSerial]
return parsed_dict
except (AttributeError, IndexError, ValueError):
raise RainCloudyException(
'Could not find any valid controller or faucet')
def find_program_status(data, zone):
"""
Find on the HTML document if zoneX has the configuration
of the auto-schedule/program (auto_watering) enabled.
# expected result if enabled
#<input checked="checked" class="switch" id="id_zone2_program_toggle" \
name="zone2_program_toggle" onchange="submit()" type="checkbox"/>
# expected result if disabled
#<input class="switch" id="id_zone1_program_toggle" \
name="zone1_program_toggle" onchange="submit()" type="checkbox"/>
:param data: BeautifulSoup object
:param zone: zone name from class='switch'
:return: boolean if zone has program enabled
:rtype: boolean
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: if object not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
try:
child = data.find_all('input', {'class': 'switch'})
zone_id = 'id_{0}_program_toggle'.format(zone)
for member in child:
if member.get('type') == 'checkbox' and \
member.get('id') == zone_id:
return bool(member.has_attr('checked'))
raise IndexError
except (AttributeError, IndexError, ValueError):
raise RainCloudyException(
'Could not find any valid controller or faucet')
def find_zone_name(data, zone_id):
"""
Find on the HTML document the zone name.
# expected result
<span class="more_info" \
title="Zone can be renamed on Setup tab">1 - zone1</span>,
:param data: BeautifulSoup object
:param zone: zone id
:return: zone name
:rtype: string
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: return None because controller name was not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
table = data.find('table', {'class': 'zone_table'})
table_body = table.find('tbody')
rows = table_body.find_all('span', {'class': 'more_info'})
for row in rows:
if row.get_text().startswith(str(zone_id)):
return row.get_text()[4:].strip()
return None
# vim:sw=4:ts=4:et:
|
tchellomello/raincloudy | raincloudy/helpers.py | find_zone_name | python | def find_zone_name(data, zone_id):
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
table = data.find('table', {'class': 'zone_table'})
table_body = table.find('tbody')
rows = table_body.find_all('span', {'class': 'more_info'})
for row in rows:
if row.get_text().startswith(str(zone_id)):
return row.get_text()[4:].strip()
return None | Find on the HTML document the zone name.
# expected result
<span class="more_info" \
title="Zone can be renamed on Setup tab">1 - zone1</span>,
:param data: BeautifulSoup object
:param zone: zone id
:return: zone name
:rtype: string
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: return None because controller name was not found | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/helpers.py#L126-L150 | null | # -*- coding: utf-8 -*-
"""Raincloudy helpers."""
from bs4 import BeautifulSoup
from raincloudy.exceptions import RainCloudyException
def generate_soup_html(data):
"""Return an BeautifulSoup HTML parser document."""
try:
return BeautifulSoup(data, 'html.parser')
except:
raise
def serial_finder(data):
"""
Find controller serial and faucet_serial from the setup page.
<select id="id_select_controller2" name="select_controller" >
<option value='0' selected='selected'>1 - Controller001</option>
</select>
:param data: text to be parsed
:type data: BeautilSoup object
:return: a dict with controller_serial and faucet_serial
:rtype: dict
:raises IndexError: if controller_serial was not found on the data
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautifulSoup HTML element.")
try:
# The setup page contains a select box for each controller and each
# faucet
controllersElement = data.find_all('select',
{'id': 'id_select_controller2'})
faucetsElement = data.find_all('select',
{'id': 'id_select_faucet2'})
controllerSerial = controllersElement[0].text.split('-')[1].strip()
faucetSerial = faucetsElement[0].text.split('-')[1].strip()
# currently only one faucet is supported on the code
# we have plans to support it in the future
parsed_dict = {}
parsed_dict['controller_serial'] = controllerSerial
parsed_dict['faucet_serial'] = [faucetSerial]
return parsed_dict
except (AttributeError, IndexError, ValueError):
raise RainCloudyException(
'Could not find any valid controller or faucet')
def find_program_status(data, zone):
"""
Find on the HTML document if zoneX has the configuration
of the auto-schedule/program (auto_watering) enabled.
# expected result if enabled
#<input checked="checked" class="switch" id="id_zone2_program_toggle" \
name="zone2_program_toggle" onchange="submit()" type="checkbox"/>
# expected result if disabled
#<input class="switch" id="id_zone1_program_toggle" \
name="zone1_program_toggle" onchange="submit()" type="checkbox"/>
:param data: BeautifulSoup object
:param zone: zone name from class='switch'
:return: boolean if zone has program enabled
:rtype: boolean
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: if object not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
try:
child = data.find_all('input', {'class': 'switch'})
zone_id = 'id_{0}_program_toggle'.format(zone)
for member in child:
if member.get('type') == 'checkbox' and \
member.get('id') == zone_id:
return bool(member.has_attr('checked'))
raise IndexError
except (AttributeError, IndexError, ValueError):
raise RainCloudyException(
'Could not find any valid controller or faucet')
def find_controller_or_faucet_name(data, p_type):
"""
Find on the HTML document the controller name.
# expected result
<label for="select_controller">
<span class="more_info" id="#styling-type-light" data-hasqtip="26" \
title="Select Control Unit to display." >Control Unit:</span></label><br/>
<select class="simpleselect" id="id_select_controller" \
name="select_controller" onchange="submit()" >
<option value="0" selected="selected">HERE_IS_CONTROLLER_NAME
:param data: BeautifulSoup object
:param p_type: parameter type. (controller or faucet)
:return: controller or valve name
:rtype: string.
:raises TypeError: if data is not a BeautifulSoup object
:raises IndexError: return None because controller name was not found
"""
if not isinstance(data, BeautifulSoup):
raise TypeError("Function requires BeautilSoup HTML element.")
if not (p_type == 'controller' or p_type == 'faucet'):
raise TypeError("Function p_type must be controller or faucet")
try:
search_field = 'id_select_{0}'.format(p_type)
child = data.find('select', {'id': search_field})
return child.get_text().strip()
except AttributeError:
return None
# vim:sw=4:ts=4:et:
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | TicketAPI.get_ticket | python | def get_ticket(self, ticket_id):
url = 'tickets/%d' % ticket_id
ticket = self._api._get(url)
return Ticket(**ticket) | Fetches the ticket for the given ticket ID | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L11-L15 | null | class TicketAPI(object):
def __init__(self, api):
self._api = api
def create_ticket(self, subject, **kwargs):
"""
Creates a ticket
To create ticket with attachments,
pass a key 'attachments' with value as list of fully qualified file paths in string format.
ex: attachments = ('/path/to/attachment1', '/path/to/attachment2')
"""
url = 'tickets'
status = kwargs.get('status', 2)
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'status': status,
'priority': priority,
}
data.update(kwargs)
if 'attachments' in data:
ticket = self._create_ticket_with_attachment(url, data)
return Ticket(**ticket)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket)
def _create_ticket_with_attachment(self, url, data):
attachments = data['attachments']
del data['attachments']
multipart_data = []
for attachment in attachments:
file_name = attachment.split("/")[-1:][0]
multipart_data.append(('attachments[]', (file_name, open(attachment), None)))
ticket = self._api._post(url, data=data, files=multipart_data)
return ticket
def create_outbound_email(self, subject, description, email, email_config_id, **kwargs):
"""Creates an outbound email"""
url = 'tickets/outbound_email'
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'description': description,
'priority': priority,
'email': email,
'email_config_id': email_config_id,
}
data.update(kwargs)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket)
def update_ticket(self, ticket_id, **kwargs):
"""Updates a ticket from a given ticket ID"""
url = 'tickets/%d' % ticket_id
ticket = self._api._put(url, data=json.dumps(kwargs))
return Ticket(**ticket)
def delete_ticket(self, ticket_id):
"""Delete the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
self._api._delete(url)
def list_tickets(self, **kwargs):
"""List all tickets, optionally filtered by a view. Specify filters as
keyword arguments, such as:
filter_name = one of ['new_and_my_open', 'watching', 'spam', 'deleted',
None]
(defaults to 'new_and_my_open')
Passing None means that no named filter will be passed to
Freshdesk, which mimics the behavior of the 'all_tickets' filter
in v1 of the API.
Multiple filters are AND'd together.
"""
filter_name = 'new_and_my_open'
if 'filter_name' in kwargs:
filter_name = kwargs['filter_name']
del kwargs['filter_name']
url = 'tickets'
if filter_name is not None:
url += '?filter=%s&' % filter_name
else:
url += '?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
tickets = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
tickets += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Ticket(**t) for t in tickets]
def list_new_and_my_open_tickets(self):
"""List all new and open tickets."""
return self.list_tickets(filter_name='new_and_my_open')
def list_watched_tickets(self):
"""List watched tickets, closed or open."""
return self.list_tickets(filter_name='watching')
def list_deleted_tickets(self):
"""Lists all deleted tickets."""
return self.list_tickets(filter_name='deleted')
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | TicketAPI.create_ticket | python | def create_ticket(self, subject, **kwargs):
url = 'tickets'
status = kwargs.get('status', 2)
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'status': status,
'priority': priority,
}
data.update(kwargs)
if 'attachments' in data:
ticket = self._create_ticket_with_attachment(url, data)
return Ticket(**ticket)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket) | Creates a ticket
To create ticket with attachments,
pass a key 'attachments' with value as list of fully qualified file paths in string format.
ex: attachments = ('/path/to/attachment1', '/path/to/attachment2') | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L17-L39 | [
"def _create_ticket_with_attachment(self, url, data):\n attachments = data['attachments']\n del data['attachments']\n multipart_data = []\n\n for attachment in attachments:\n file_name = attachment.split(\"/\")[-1:][0]\n multipart_data.append(('attachments[]', (file_name, open(attachment),... | class TicketAPI(object):
def __init__(self, api):
self._api = api
def get_ticket(self, ticket_id):
"""Fetches the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
ticket = self._api._get(url)
return Ticket(**ticket)
def _create_ticket_with_attachment(self, url, data):
attachments = data['attachments']
del data['attachments']
multipart_data = []
for attachment in attachments:
file_name = attachment.split("/")[-1:][0]
multipart_data.append(('attachments[]', (file_name, open(attachment), None)))
ticket = self._api._post(url, data=data, files=multipart_data)
return ticket
def create_outbound_email(self, subject, description, email, email_config_id, **kwargs):
"""Creates an outbound email"""
url = 'tickets/outbound_email'
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'description': description,
'priority': priority,
'email': email,
'email_config_id': email_config_id,
}
data.update(kwargs)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket)
def update_ticket(self, ticket_id, **kwargs):
"""Updates a ticket from a given ticket ID"""
url = 'tickets/%d' % ticket_id
ticket = self._api._put(url, data=json.dumps(kwargs))
return Ticket(**ticket)
def delete_ticket(self, ticket_id):
"""Delete the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
self._api._delete(url)
def list_tickets(self, **kwargs):
"""List all tickets, optionally filtered by a view. Specify filters as
keyword arguments, such as:
filter_name = one of ['new_and_my_open', 'watching', 'spam', 'deleted',
None]
(defaults to 'new_and_my_open')
Passing None means that no named filter will be passed to
Freshdesk, which mimics the behavior of the 'all_tickets' filter
in v1 of the API.
Multiple filters are AND'd together.
"""
filter_name = 'new_and_my_open'
if 'filter_name' in kwargs:
filter_name = kwargs['filter_name']
del kwargs['filter_name']
url = 'tickets'
if filter_name is not None:
url += '?filter=%s&' % filter_name
else:
url += '?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
tickets = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
tickets += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Ticket(**t) for t in tickets]
def list_new_and_my_open_tickets(self):
"""List all new and open tickets."""
return self.list_tickets(filter_name='new_and_my_open')
def list_watched_tickets(self):
"""List watched tickets, closed or open."""
return self.list_tickets(filter_name='watching')
def list_deleted_tickets(self):
"""Lists all deleted tickets."""
return self.list_tickets(filter_name='deleted')
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | TicketAPI.create_outbound_email | python | def create_outbound_email(self, subject, description, email, email_config_id, **kwargs):
url = 'tickets/outbound_email'
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'description': description,
'priority': priority,
'email': email,
'email_config_id': email_config_id,
}
data.update(kwargs)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket) | Creates an outbound email | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L53-L66 | null | class TicketAPI(object):
def __init__(self, api):
self._api = api
def get_ticket(self, ticket_id):
"""Fetches the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
ticket = self._api._get(url)
return Ticket(**ticket)
def create_ticket(self, subject, **kwargs):
"""
Creates a ticket
To create ticket with attachments,
pass a key 'attachments' with value as list of fully qualified file paths in string format.
ex: attachments = ('/path/to/attachment1', '/path/to/attachment2')
"""
url = 'tickets'
status = kwargs.get('status', 2)
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'status': status,
'priority': priority,
}
data.update(kwargs)
if 'attachments' in data:
ticket = self._create_ticket_with_attachment(url, data)
return Ticket(**ticket)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket)
def _create_ticket_with_attachment(self, url, data):
attachments = data['attachments']
del data['attachments']
multipart_data = []
for attachment in attachments:
file_name = attachment.split("/")[-1:][0]
multipart_data.append(('attachments[]', (file_name, open(attachment), None)))
ticket = self._api._post(url, data=data, files=multipart_data)
return ticket
def update_ticket(self, ticket_id, **kwargs):
"""Updates a ticket from a given ticket ID"""
url = 'tickets/%d' % ticket_id
ticket = self._api._put(url, data=json.dumps(kwargs))
return Ticket(**ticket)
def delete_ticket(self, ticket_id):
"""Delete the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
self._api._delete(url)
def list_tickets(self, **kwargs):
"""List all tickets, optionally filtered by a view. Specify filters as
keyword arguments, such as:
filter_name = one of ['new_and_my_open', 'watching', 'spam', 'deleted',
None]
(defaults to 'new_and_my_open')
Passing None means that no named filter will be passed to
Freshdesk, which mimics the behavior of the 'all_tickets' filter
in v1 of the API.
Multiple filters are AND'd together.
"""
filter_name = 'new_and_my_open'
if 'filter_name' in kwargs:
filter_name = kwargs['filter_name']
del kwargs['filter_name']
url = 'tickets'
if filter_name is not None:
url += '?filter=%s&' % filter_name
else:
url += '?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
tickets = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
tickets += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Ticket(**t) for t in tickets]
def list_new_and_my_open_tickets(self):
"""List all new and open tickets."""
return self.list_tickets(filter_name='new_and_my_open')
def list_watched_tickets(self):
"""List watched tickets, closed or open."""
return self.list_tickets(filter_name='watching')
def list_deleted_tickets(self):
"""Lists all deleted tickets."""
return self.list_tickets(filter_name='deleted')
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | TicketAPI.update_ticket | python | def update_ticket(self, ticket_id, **kwargs):
url = 'tickets/%d' % ticket_id
ticket = self._api._put(url, data=json.dumps(kwargs))
return Ticket(**ticket) | Updates a ticket from a given ticket ID | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L68-L72 | null | class TicketAPI(object):
def __init__(self, api):
self._api = api
def get_ticket(self, ticket_id):
"""Fetches the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
ticket = self._api._get(url)
return Ticket(**ticket)
def create_ticket(self, subject, **kwargs):
"""
Creates a ticket
To create ticket with attachments,
pass a key 'attachments' with value as list of fully qualified file paths in string format.
ex: attachments = ('/path/to/attachment1', '/path/to/attachment2')
"""
url = 'tickets'
status = kwargs.get('status', 2)
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'status': status,
'priority': priority,
}
data.update(kwargs)
if 'attachments' in data:
ticket = self._create_ticket_with_attachment(url, data)
return Ticket(**ticket)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket)
def _create_ticket_with_attachment(self, url, data):
attachments = data['attachments']
del data['attachments']
multipart_data = []
for attachment in attachments:
file_name = attachment.split("/")[-1:][0]
multipart_data.append(('attachments[]', (file_name, open(attachment), None)))
ticket = self._api._post(url, data=data, files=multipart_data)
return ticket
def create_outbound_email(self, subject, description, email, email_config_id, **kwargs):
"""Creates an outbound email"""
url = 'tickets/outbound_email'
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'description': description,
'priority': priority,
'email': email,
'email_config_id': email_config_id,
}
data.update(kwargs)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket)
def delete_ticket(self, ticket_id):
"""Delete the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
self._api._delete(url)
def list_tickets(self, **kwargs):
"""List all tickets, optionally filtered by a view. Specify filters as
keyword arguments, such as:
filter_name = one of ['new_and_my_open', 'watching', 'spam', 'deleted',
None]
(defaults to 'new_and_my_open')
Passing None means that no named filter will be passed to
Freshdesk, which mimics the behavior of the 'all_tickets' filter
in v1 of the API.
Multiple filters are AND'd together.
"""
filter_name = 'new_and_my_open'
if 'filter_name' in kwargs:
filter_name = kwargs['filter_name']
del kwargs['filter_name']
url = 'tickets'
if filter_name is not None:
url += '?filter=%s&' % filter_name
else:
url += '?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
tickets = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
tickets += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Ticket(**t) for t in tickets]
def list_new_and_my_open_tickets(self):
"""List all new and open tickets."""
return self.list_tickets(filter_name='new_and_my_open')
def list_watched_tickets(self):
"""List watched tickets, closed or open."""
return self.list_tickets(filter_name='watching')
def list_deleted_tickets(self):
"""Lists all deleted tickets."""
return self.list_tickets(filter_name='deleted')
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | TicketAPI.list_tickets | python | def list_tickets(self, **kwargs):
filter_name = 'new_and_my_open'
if 'filter_name' in kwargs:
filter_name = kwargs['filter_name']
del kwargs['filter_name']
url = 'tickets'
if filter_name is not None:
url += '?filter=%s&' % filter_name
else:
url += '?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
tickets = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
tickets += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Ticket(**t) for t in tickets] | List all tickets, optionally filtered by a view. Specify filters as
keyword arguments, such as:
filter_name = one of ['new_and_my_open', 'watching', 'spam', 'deleted',
None]
(defaults to 'new_and_my_open')
Passing None means that no named filter will be passed to
Freshdesk, which mimics the behavior of the 'all_tickets' filter
in v1 of the API.
Multiple filters are AND'd together. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L79-L117 | null | class TicketAPI(object):
def __init__(self, api):
self._api = api
def get_ticket(self, ticket_id):
"""Fetches the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
ticket = self._api._get(url)
return Ticket(**ticket)
def create_ticket(self, subject, **kwargs):
"""
Creates a ticket
To create ticket with attachments,
pass a key 'attachments' with value as list of fully qualified file paths in string format.
ex: attachments = ('/path/to/attachment1', '/path/to/attachment2')
"""
url = 'tickets'
status = kwargs.get('status', 2)
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'status': status,
'priority': priority,
}
data.update(kwargs)
if 'attachments' in data:
ticket = self._create_ticket_with_attachment(url, data)
return Ticket(**ticket)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket)
def _create_ticket_with_attachment(self, url, data):
attachments = data['attachments']
del data['attachments']
multipart_data = []
for attachment in attachments:
file_name = attachment.split("/")[-1:][0]
multipart_data.append(('attachments[]', (file_name, open(attachment), None)))
ticket = self._api._post(url, data=data, files=multipart_data)
return ticket
def create_outbound_email(self, subject, description, email, email_config_id, **kwargs):
"""Creates an outbound email"""
url = 'tickets/outbound_email'
priority = kwargs.get('priority', 1)
data = {
'subject': subject,
'description': description,
'priority': priority,
'email': email,
'email_config_id': email_config_id,
}
data.update(kwargs)
ticket = self._api._post(url, data=json.dumps(data))
return Ticket(**ticket)
def update_ticket(self, ticket_id, **kwargs):
"""Updates a ticket from a given ticket ID"""
url = 'tickets/%d' % ticket_id
ticket = self._api._put(url, data=json.dumps(kwargs))
return Ticket(**ticket)
def delete_ticket(self, ticket_id):
"""Delete the ticket for the given ticket ID"""
url = 'tickets/%d' % ticket_id
self._api._delete(url)
def list_new_and_my_open_tickets(self):
"""List all new and open tickets."""
return self.list_tickets(filter_name='new_and_my_open')
def list_watched_tickets(self):
"""List watched tickets, closed or open."""
return self.list_tickets(filter_name='watching')
def list_deleted_tickets(self):
"""Lists all deleted tickets."""
return self.list_tickets(filter_name='deleted')
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | ContactAPI.list_contacts | python | def list_contacts(self, **kwargs):
url = 'contacts?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
contacts = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
contacts += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Contact(**c) for c in contacts] | List all contacts, optionally filtered by a query. Specify filters as
query keyword argument, such as:
email=abc@xyz.com,
mobile=1234567890,
phone=1234567890,
contacts can be filtered by state and company_id such as:
state=[blocked/deleted/unverified/verified]
company_id=1234
contacts updated after a timestamp can be filtered such as;
_updated_since=2018-01-19T02:00:00Z
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all contacts | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L185-L225 | null | class ContactAPI(object):
def __init__(self, api):
self._api = api
def create_contact(self, *args, **kwargs):
"""Creates a contact"""
url = 'contacts'
data = {
'view_all_tickets': False,
'description': 'Freshdesk Contact'
}
data.update(kwargs)
return Contact(**self._api._post(url, data=json.dumps(data)))
def get_contact(self, contact_id):
url = 'contacts/%d' % contact_id
return Contact(**self._api._get(url))
def update_contact(self, contact_id, **data):
url = 'contacts/%d' % contact_id
return Contact(**self._api._put(url, data=json.dumps(data)))
def soft_delete_contact(self, contact_id):
url = 'contacts/%d' % contact_id
self._api._delete(url)
def restore_contact(self, contact_id):
url = 'contacts/%d/restore' % contact_id
self._api._put(url)
def permanently_delete_contact(self, contact_id, force=True):
url = 'contacts/%d/hard_delete?force=%r' % (contact_id, force)
self._api._delete(url)
def make_agent(self, contact_id, **kwargs):
url = 'contacts/%d/make_agent' % contact_id
data = {
'occasional': False,
'ticket_scope': 2,
}
data.update(kwargs)
contact = self._api._put(url, data=json.dumps(data))
return self._api.agents.get_agent(contact['agent']['id'])
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | ContactAPI.create_contact | python | def create_contact(self, *args, **kwargs):
url = 'contacts'
data = {
'view_all_tickets': False,
'description': 'Freshdesk Contact'
}
data.update(kwargs)
return Contact(**self._api._post(url, data=json.dumps(data))) | Creates a contact | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L227-L235 | null | class ContactAPI(object):
def __init__(self, api):
self._api = api
def list_contacts(self, **kwargs):
"""
List all contacts, optionally filtered by a query. Specify filters as
query keyword argument, such as:
email=abc@xyz.com,
mobile=1234567890,
phone=1234567890,
contacts can be filtered by state and company_id such as:
state=[blocked/deleted/unverified/verified]
company_id=1234
contacts updated after a timestamp can be filtered such as;
_updated_since=2018-01-19T02:00:00Z
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all contacts
"""
url = 'contacts?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
contacts = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
contacts += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Contact(**c) for c in contacts]
def get_contact(self, contact_id):
url = 'contacts/%d' % contact_id
return Contact(**self._api._get(url))
def update_contact(self, contact_id, **data):
url = 'contacts/%d' % contact_id
return Contact(**self._api._put(url, data=json.dumps(data)))
def soft_delete_contact(self, contact_id):
url = 'contacts/%d' % contact_id
self._api._delete(url)
def restore_contact(self, contact_id):
url = 'contacts/%d/restore' % contact_id
self._api._put(url)
def permanently_delete_contact(self, contact_id, force=True):
url = 'contacts/%d/hard_delete?force=%r' % (contact_id, force)
self._api._delete(url)
def make_agent(self, contact_id, **kwargs):
url = 'contacts/%d/make_agent' % contact_id
data = {
'occasional': False,
'ticket_scope': 2,
}
data.update(kwargs)
contact = self._api._put(url, data=json.dumps(data))
return self._api.agents.get_agent(contact['agent']['id'])
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | AgentAPI.list_agents | python | def list_agents(self, **kwargs):
url = 'agents?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
agents = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
agents += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Agent(**a) for a in agents] | List all agents, optionally filtered by a view. Specify filters as
keyword arguments, such as:
{
email='abc@xyz.com',
phone=873902,
mobile=56523,
state='fulltime'
}
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all agents
Multiple filters are AND'd together. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L325-L358 | null | class AgentAPI(object):
def __init__(self, api):
self._api = api
def get_agent(self, agent_id):
"""Fetches the agent for the given agent ID"""
url = 'agents/%s' % agent_id
return Agent(**self._api._get(url))
def update_agent(self, agent_id, **kwargs):
"""Updates an agent"""
url = 'agents/%s' % agent_id
agent = self._api._put(url, data=json.dumps(kwargs))
return Agent(**agent)
def delete_agent(self, agent_id):
"""Delete the agent for the given agent ID"""
url = 'agents/%d' % agent_id
self._api._delete(url)
def currently_authenticated_agent(self):
"""Fetches currently logged in agent"""
url = 'agents/me'
return Agent(**self._api._get(url))
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | AgentAPI.get_agent | python | def get_agent(self, agent_id):
url = 'agents/%s' % agent_id
return Agent(**self._api._get(url)) | Fetches the agent for the given agent ID | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L360-L363 | null | class AgentAPI(object):
def __init__(self, api):
self._api = api
def list_agents(self, **kwargs):
"""List all agents, optionally filtered by a view. Specify filters as
keyword arguments, such as:
{
email='abc@xyz.com',
phone=873902,
mobile=56523,
state='fulltime'
}
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all agents
Multiple filters are AND'd together.
"""
url = 'agents?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
agents = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
agents += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Agent(**a) for a in agents]
def update_agent(self, agent_id, **kwargs):
"""Updates an agent"""
url = 'agents/%s' % agent_id
agent = self._api._put(url, data=json.dumps(kwargs))
return Agent(**agent)
def delete_agent(self, agent_id):
"""Delete the agent for the given agent ID"""
url = 'agents/%d' % agent_id
self._api._delete(url)
def currently_authenticated_agent(self):
"""Fetches currently logged in agent"""
url = 'agents/me'
return Agent(**self._api._get(url))
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | AgentAPI.update_agent | python | def update_agent(self, agent_id, **kwargs):
url = 'agents/%s' % agent_id
agent = self._api._put(url, data=json.dumps(kwargs))
return Agent(**agent) | Updates an agent | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L365-L369 | null | class AgentAPI(object):
def __init__(self, api):
self._api = api
def list_agents(self, **kwargs):
"""List all agents, optionally filtered by a view. Specify filters as
keyword arguments, such as:
{
email='abc@xyz.com',
phone=873902,
mobile=56523,
state='fulltime'
}
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all agents
Multiple filters are AND'd together.
"""
url = 'agents?'
page = 1 if not 'page' in kwargs else kwargs['page']
per_page = 100 if not 'per_page' in kwargs else kwargs['per_page']
agents = []
# Skip pagination by looping over each page and adding tickets if 'page' key is not in kwargs.
# else return the requested page and break the loop
while True:
this_page = self._api._get(url + 'page=%d&per_page=%d'
% (page, per_page), kwargs)
agents += this_page
if len(this_page) < per_page or 'page' in kwargs:
break
page += 1
return [Agent(**a) for a in agents]
def get_agent(self, agent_id):
"""Fetches the agent for the given agent ID"""
url = 'agents/%s' % agent_id
return Agent(**self._api._get(url))
def delete_agent(self, agent_id):
"""Delete the agent for the given agent ID"""
url = 'agents/%d' % agent_id
self._api._delete(url)
def currently_authenticated_agent(self):
"""Fetches currently logged in agent"""
url = 'agents/me'
return Agent(**self._api._get(url))
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | API._get | python | def _get(self, url, params={}):
req = self._session.get(self._api_prefix + url, params=params)
return self._action(req) | Wrapper around request.get() to use the API prefix. Returns a JSON response. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L442-L445 | null | class API(object):
def __init__(self, domain, api_key, verify=True, proxies=None):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/api/v2/'.format(domain.rstrip('/'))
self._session = requests.Session()
self._session.auth = (api_key, 'unused_with_api_key')
self._session.verify = verify
self._session.proxies = proxies
self._session.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.comments = CommentAPI(self)
self.contacts = ContactAPI(self)
self.companies = CompanyAPI(self)
self.groups = GroupAPI(self)
self.customers = CustomerAPI(self)
self.agents = AgentAPI(self)
self.roles = RoleAPI(self)
self.ticket_fields = TicketFieldAPI(self)
if domain.find('freshdesk.com') < 0:
raise AttributeError('Freshdesk v2 API works only via Freshdesk'
'domains and not via custom CNAMEs')
self.domain = domain
def _action(self, req):
try:
j = req.json()
except:
req.raise_for_status()
j = {}
if 'Retry-After' in req.headers:
raise HTTPError('429 Rate Limit Exceeded: API rate-limit has been reached until {} seconds.'
'See http://freshdesk.com/api#ratelimit'.format(req.headers['Retry-After']))
if 'code' in j and j['code'] == "invalid_credentials":
raise HTTPError('401 Unauthorized: Please login with correct credentials')
if 'errors' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
req.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j
def _post(self, url, data={}, **kwargs):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
if 'files' in kwargs:
req = self._session.post(self._api_prefix + url, auth=self._session.auth, data=data, **kwargs)
return self._action(req)
req = self._session.post(self._api_prefix + url, data=data, **kwargs)
return self._action(req)
def _put(self, url, data={}):
"""Wrapper around request.put() to use the API prefix. Returns a JSON response."""
req = self._session.put(self._api_prefix + url, data=data)
return self._action(req)
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
req = self._session.delete(self._api_prefix + url)
return self._action(req)
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | API._post | python | def _post(self, url, data={}, **kwargs):
if 'files' in kwargs:
req = self._session.post(self._api_prefix + url, auth=self._session.auth, data=data, **kwargs)
return self._action(req)
req = self._session.post(self._api_prefix + url, data=data, **kwargs)
return self._action(req) | Wrapper around request.post() to use the API prefix. Returns a JSON response. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L447-L454 | null | class API(object):
def __init__(self, domain, api_key, verify=True, proxies=None):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/api/v2/'.format(domain.rstrip('/'))
self._session = requests.Session()
self._session.auth = (api_key, 'unused_with_api_key')
self._session.verify = verify
self._session.proxies = proxies
self._session.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.comments = CommentAPI(self)
self.contacts = ContactAPI(self)
self.companies = CompanyAPI(self)
self.groups = GroupAPI(self)
self.customers = CustomerAPI(self)
self.agents = AgentAPI(self)
self.roles = RoleAPI(self)
self.ticket_fields = TicketFieldAPI(self)
if domain.find('freshdesk.com') < 0:
raise AttributeError('Freshdesk v2 API works only via Freshdesk'
'domains and not via custom CNAMEs')
self.domain = domain
def _action(self, req):
try:
j = req.json()
except:
req.raise_for_status()
j = {}
if 'Retry-After' in req.headers:
raise HTTPError('429 Rate Limit Exceeded: API rate-limit has been reached until {} seconds.'
'See http://freshdesk.com/api#ratelimit'.format(req.headers['Retry-After']))
if 'code' in j and j['code'] == "invalid_credentials":
raise HTTPError('401 Unauthorized: Please login with correct credentials')
if 'errors' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
req.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
req = self._session.get(self._api_prefix + url, params=params)
return self._action(req)
def _put(self, url, data={}):
"""Wrapper around request.put() to use the API prefix. Returns a JSON response."""
req = self._session.put(self._api_prefix + url, data=data)
return self._action(req)
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
req = self._session.delete(self._api_prefix + url)
return self._action(req)
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | API._put | python | def _put(self, url, data={}):
req = self._session.put(self._api_prefix + url, data=data)
return self._action(req) | Wrapper around request.put() to use the API prefix. Returns a JSON response. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L456-L459 | null | class API(object):
def __init__(self, domain, api_key, verify=True, proxies=None):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/api/v2/'.format(domain.rstrip('/'))
self._session = requests.Session()
self._session.auth = (api_key, 'unused_with_api_key')
self._session.verify = verify
self._session.proxies = proxies
self._session.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.comments = CommentAPI(self)
self.contacts = ContactAPI(self)
self.companies = CompanyAPI(self)
self.groups = GroupAPI(self)
self.customers = CustomerAPI(self)
self.agents = AgentAPI(self)
self.roles = RoleAPI(self)
self.ticket_fields = TicketFieldAPI(self)
if domain.find('freshdesk.com') < 0:
raise AttributeError('Freshdesk v2 API works only via Freshdesk'
'domains and not via custom CNAMEs')
self.domain = domain
def _action(self, req):
try:
j = req.json()
except:
req.raise_for_status()
j = {}
if 'Retry-After' in req.headers:
raise HTTPError('429 Rate Limit Exceeded: API rate-limit has been reached until {} seconds.'
'See http://freshdesk.com/api#ratelimit'.format(req.headers['Retry-After']))
if 'code' in j and j['code'] == "invalid_credentials":
raise HTTPError('401 Unauthorized: Please login with correct credentials')
if 'errors' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
req.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
req = self._session.get(self._api_prefix + url, params=params)
return self._action(req)
def _post(self, url, data={}, **kwargs):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
if 'files' in kwargs:
req = self._session.post(self._api_prefix + url, auth=self._session.auth, data=data, **kwargs)
return self._action(req)
req = self._session.post(self._api_prefix + url, data=data, **kwargs)
return self._action(req)
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
req = self._session.delete(self._api_prefix + url)
return self._action(req)
|
sjkingo/python-freshdesk | freshdesk/v2/api.py | API._delete | python | def _delete(self, url):
req = self._session.delete(self._api_prefix + url)
return self._action(req) | Wrapper around request.delete() to use the API prefix. Returns a JSON response. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L461-L464 | null | class API(object):
def __init__(self, domain, api_key, verify=True, proxies=None):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/api/v2/'.format(domain.rstrip('/'))
self._session = requests.Session()
self._session.auth = (api_key, 'unused_with_api_key')
self._session.verify = verify
self._session.proxies = proxies
self._session.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.comments = CommentAPI(self)
self.contacts = ContactAPI(self)
self.companies = CompanyAPI(self)
self.groups = GroupAPI(self)
self.customers = CustomerAPI(self)
self.agents = AgentAPI(self)
self.roles = RoleAPI(self)
self.ticket_fields = TicketFieldAPI(self)
if domain.find('freshdesk.com') < 0:
raise AttributeError('Freshdesk v2 API works only via Freshdesk'
'domains and not via custom CNAMEs')
self.domain = domain
def _action(self, req):
try:
j = req.json()
except:
req.raise_for_status()
j = {}
if 'Retry-After' in req.headers:
raise HTTPError('429 Rate Limit Exceeded: API rate-limit has been reached until {} seconds.'
'See http://freshdesk.com/api#ratelimit'.format(req.headers['Retry-After']))
if 'code' in j and j['code'] == "invalid_credentials":
raise HTTPError('401 Unauthorized: Please login with correct credentials')
if 'errors' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
req.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
req = self._session.get(self._api_prefix + url, params=params)
return self._action(req)
def _post(self, url, data={}, **kwargs):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
if 'files' in kwargs:
req = self._session.post(self._api_prefix + url, auth=self._session.auth, data=data, **kwargs)
return self._action(req)
req = self._session.post(self._api_prefix + url, data=data, **kwargs)
return self._action(req)
def _put(self, url, data={}):
"""Wrapper around request.put() to use the API prefix. Returns a JSON response."""
req = self._session.put(self._api_prefix + url, data=data)
return self._action(req)
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | TicketAPI.list_tickets | python | def list_tickets(self, **kwargs):
filter_name = 'all_tickets'
if 'filter_name' in kwargs and kwargs['filter_name'] is not None:
filter_name = kwargs['filter_name']
del kwargs['filter_name']
url = 'helpdesk/tickets/filter/%s?format=json' % filter_name
page = 1
tickets = []
# Skip pagination by looping over each page and adding tickets
while True:
this_page = self._api._get(url + '&page=%d' % page, kwargs)
if len(this_page) == 0:
break
tickets += this_page
page += 1
return [self.get_ticket(t['display_id']) for t in tickets] | List all tickets, optionally filtered by a view. Specify filters as
keyword arguments, such as:
filter_name = one of ['all_tickets', 'new_my_open', 'spam', 'deleted',
None]
(defaults to 'all_tickets'; passing None uses the default)
Multiple filters are AND'd together. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L34-L62 | null | class TicketAPI(object):
def __init__(self, api):
self._api = api
def create_ticket(self, subject, **kwargs):
url = 'helpdesk/tickets.json'
status = kwargs.get('status', 2)
priority = kwargs.get('priority', 1)
cc_emails = ','.join(kwargs.get('cc_emails', []))
ticket_data = {
'subject': subject,
'status': status,
'priority': priority,
}
ticket_data.update(kwargs)
data = {
'helpdesk_ticket': ticket_data,
'cc_emails': cc_emails,
}
return Ticket(**self._api._post(url, data=data)['helpdesk_ticket'])
def get_ticket(self, ticket_id):
"""Fetches the ticket for the given ticket ID"""
url = 'helpdesk/tickets/%d.json' % ticket_id
return Ticket(**self._api._get(url)['helpdesk_ticket'])
def list_all_tickets(self):
"""List all tickets, closed or open."""
return self.list_tickets(filter_name='all_tickets')
def list_open_tickets(self):
"""List all new and open tickets."""
return self.list_tickets(filter_name='new_my_open')
def list_deleted_tickets(self):
"""Lists all deleted tickets."""
return self.list_tickets(filter_name='deleted')
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | ContactAPI.list_contacts | python | def list_contacts(self, **kwargs):
url = 'contacts.json?'
if 'query' in kwargs.keys():
filter_query = kwargs.pop('query')
url = url + "query={}".format(filter_query)
if 'state' in kwargs.keys():
state_query = kwargs.pop('state')
url = url + "state={}".format(state_query)
if 'letter' in kwargs.keys():
name_query = kwargs.pop('letter')
url = url + "letter={}".format(name_query)
contacts = self._api._get(url)
return [Contact(**c['user']) for c in contacts] | List all contacts, optionally filtered by a query. Specify filters as
query keyword argument, such as:
query= email is abc@xyz.com,
query= mobile is 1234567890,
query= phone is 1234567890,
contacts can be filtered by name such as;
letter=Prenit
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all contacts | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L81-L113 | null | class ContactAPI(object):
def __init__(self, api):
self._api = api
def list_contacts(self, **kwargs):
"""
List all contacts, optionally filtered by a query. Specify filters as
query keyword argument, such as:
query= email is abc@xyz.com,
query= mobile is 1234567890,
query= phone is 1234567890,
contacts can be filtered by name such as;
letter=Prenit
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all contacts
"""
url = 'contacts.json?'
if 'query' in kwargs.keys():
filter_query = kwargs.pop('query')
url = url + "query={}".format(filter_query)
if 'state' in kwargs.keys():
state_query = kwargs.pop('state')
url = url + "state={}".format(state_query)
if 'letter' in kwargs.keys():
name_query = kwargs.pop('letter')
url = url + "letter={}".format(name_query)
contacts = self._api._get(url)
return [Contact(**c['user']) for c in contacts]
def create_contact(self, *args, **kwargs):
"""Creates a contact"""
url = 'contacts.json'
contact_data = {
'active': True,
'helpdesk_agent': False,
'description': 'Freshdesk Contact'
}
contact_data.update(kwargs)
payload = {
'user': contact_data
}
return Contact(**self._api._post(url, data=payload)['user'])
def make_agent(self, contact_id):
url = 'contacts/%d/make_agent.json' % contact_id
agent = self._api._put(url)['agent']
return self._api.agents.get_agent(agent['id'])
def get_contact(self, contact_id):
url = 'contacts/%d.json' % contact_id
return Contact(**self._api._get(url)['user'])
def delete_contact(self, contact_id):
url = 'contacts/%d.json' % contact_id
self._api._delete(url)
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | ContactAPI.create_contact | python | def create_contact(self, *args, **kwargs):
url = 'contacts.json'
contact_data = {
'active': True,
'helpdesk_agent': False,
'description': 'Freshdesk Contact'
}
contact_data.update(kwargs)
payload = {
'user': contact_data
}
return Contact(**self._api._post(url, data=payload)['user']) | Creates a contact | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L115-L128 | null | class ContactAPI(object):
def __init__(self, api):
self._api = api
def list_contacts(self, **kwargs):
"""
List all contacts, optionally filtered by a query. Specify filters as
query keyword argument, such as:
query= email is abc@xyz.com,
query= mobile is 1234567890,
query= phone is 1234567890,
contacts can be filtered by name such as;
letter=Prenit
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all contacts
"""
url = 'contacts.json?'
if 'query' in kwargs.keys():
filter_query = kwargs.pop('query')
url = url + "query={}".format(filter_query)
if 'state' in kwargs.keys():
state_query = kwargs.pop('state')
url = url + "state={}".format(state_query)
if 'letter' in kwargs.keys():
name_query = kwargs.pop('letter')
url = url + "letter={}".format(name_query)
contacts = self._api._get(url)
return [Contact(**c['user']) for c in contacts]
def make_agent(self, contact_id):
url = 'contacts/%d/make_agent.json' % contact_id
agent = self._api._put(url)['agent']
return self._api.agents.get_agent(agent['id'])
def get_contact(self, contact_id):
url = 'contacts/%d.json' % contact_id
return Contact(**self._api._get(url)['user'])
def delete_contact(self, contact_id):
url = 'contacts/%d.json' % contact_id
self._api._delete(url)
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | AgentAPI.list_agents | python | def list_agents(self, **kwargs):
url = 'agents.json?'
if 'query' in kwargs.keys():
filter_query = kwargs.pop('query')
url = url + "query={}".format(filter_query)
if 'state' in kwargs.keys():
state_query = kwargs.pop('state')
url = url + "state={}".format(state_query)
agents = self._api._get(url)
return [Agent(**a['agent']) for a in agents] | List all agents, optionally filtered by a query. Specify filters as
query keyword argument, such as:
query= email is abc@xyz.com,
query= mobile is 1234567890,
query= phone is 1234567890,
agents can be filtered by state such as:
state=active/occasional
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all agents | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L148-L175 | null | class AgentAPI(object):
def __init__(self, api):
self._api = api
def list_agents(self, **kwargs):
"""List all agents, optionally filtered by a query. Specify filters as
query keyword argument, such as:
query= email is abc@xyz.com,
query= mobile is 1234567890,
query= phone is 1234567890,
agents can be filtered by state such as:
state=active/occasional
Passing None means that no named filter will be passed to
Freshdesk, which returns list of all agents
"""
url = 'agents.json?'
if 'query' in kwargs.keys():
filter_query = kwargs.pop('query')
url = url + "query={}".format(filter_query)
if 'state' in kwargs.keys():
state_query = kwargs.pop('state')
url = url + "state={}".format(state_query)
agents = self._api._get(url)
return [Agent(**a['agent']) for a in agents]
def get_agent(self, agent_id):
"""Fetches the agent for the given agent ID"""
url = 'agents/%s.json' % agent_id
return Agent(**self._api._get(url)['agent'])
def update_agent(self, agent_id, **kwargs):
"""Updates an agent"""
url = 'agents/%s.json' % agent_id
agent = self._api._put(url, data=json.dumps(kwargs))['agent']
return Agent(**agent)
def delete_agent(self, agent_id):
"""Delete the agent for the given agent ID"""
url = 'agents/%d.json' % agent_id
self._api._delete(url)
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | API._get | python | def _get(self, url, params={}):
r = requests.get(self._api_prefix + url,
params=params,
headers=self.headers,
auth=self.auth,
)
return self._action(r) | Wrapper around request.get() to use the API prefix. Returns a JSON response. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L255-L262 | null | class API(object):
def __init__(self, domain, api_key):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/'.format(domain.rstrip('/'))
self.auth = (api_key, 'X')
self.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.contacts = ContactAPI(self)
self.agents = AgentAPI(self)
self.timesheets = TimeAPI(self)
self.customers = CustomerAPI(self)
def _post(self, url, data={}):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
r = requests.post(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _put(self, url, data={}):
"""Wrapper around request.put() to use the API prefix. Returns a JSON response."""
r = requests.put(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
r = requests.delete(self._api_prefix + url,
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _action(self, res):
"""Returns JSON response or raise exception if errors are present"""
try:
j = res.json()
except:
res.raise_for_status()
j = {}
if 'Retry-After' in res.headers:
raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.'
'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After']))
if 'require_login' in j:
raise HTTPError('403 Forbidden: API key is incorrect for this domain')
if 'error' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
res.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | API._post | python | def _post(self, url, data={}):
r = requests.post(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r) | Wrapper around request.post() to use the API prefix. Returns a JSON response. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L264-L272 | null | class API(object):
def __init__(self, domain, api_key):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/'.format(domain.rstrip('/'))
self.auth = (api_key, 'X')
self.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.contacts = ContactAPI(self)
self.agents = AgentAPI(self)
self.timesheets = TimeAPI(self)
self.customers = CustomerAPI(self)
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
r = requests.get(self._api_prefix + url,
params=params,
headers=self.headers,
auth=self.auth,
)
return self._action(r)
def _put(self, url, data={}):
"""Wrapper around request.put() to use the API prefix. Returns a JSON response."""
r = requests.put(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
r = requests.delete(self._api_prefix + url,
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _action(self, res):
"""Returns JSON response or raise exception if errors are present"""
try:
j = res.json()
except:
res.raise_for_status()
j = {}
if 'Retry-After' in res.headers:
raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.'
'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After']))
if 'require_login' in j:
raise HTTPError('403 Forbidden: API key is incorrect for this domain')
if 'error' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
res.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | API._put | python | def _put(self, url, data={}):
r = requests.put(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r) | Wrapper around request.put() to use the API prefix. Returns a JSON response. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L274-L282 | null | class API(object):
def __init__(self, domain, api_key):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/'.format(domain.rstrip('/'))
self.auth = (api_key, 'X')
self.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.contacts = ContactAPI(self)
self.agents = AgentAPI(self)
self.timesheets = TimeAPI(self)
self.customers = CustomerAPI(self)
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
r = requests.get(self._api_prefix + url,
params=params,
headers=self.headers,
auth=self.auth,
)
return self._action(r)
def _post(self, url, data={}):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
r = requests.post(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
r = requests.delete(self._api_prefix + url,
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _action(self, res):
"""Returns JSON response or raise exception if errors are present"""
try:
j = res.json()
except:
res.raise_for_status()
j = {}
if 'Retry-After' in res.headers:
raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.'
'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After']))
if 'require_login' in j:
raise HTTPError('403 Forbidden: API key is incorrect for this domain')
if 'error' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
res.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | API._delete | python | def _delete(self, url):
r = requests.delete(self._api_prefix + url,
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r) | Wrapper around request.delete() to use the API prefix. Returns a JSON response. | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L284-L291 | null | class API(object):
def __init__(self, domain, api_key):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/'.format(domain.rstrip('/'))
self.auth = (api_key, 'X')
self.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.contacts = ContactAPI(self)
self.agents = AgentAPI(self)
self.timesheets = TimeAPI(self)
self.customers = CustomerAPI(self)
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
r = requests.get(self._api_prefix + url,
params=params,
headers=self.headers,
auth=self.auth,
)
return self._action(r)
def _post(self, url, data={}):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
r = requests.post(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _put(self, url, data={}):
"""Wrapper around request.put() to use the API prefix. Returns a JSON response."""
r = requests.put(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _action(self, res):
"""Returns JSON response or raise exception if errors are present"""
try:
j = res.json()
except:
res.raise_for_status()
j = {}
if 'Retry-After' in res.headers:
raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.'
'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After']))
if 'require_login' in j:
raise HTTPError('403 Forbidden: API key is incorrect for this domain')
if 'error' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
res.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j
|
sjkingo/python-freshdesk | freshdesk/v1/api.py | API._action | python | def _action(self, res):
try:
j = res.json()
except:
res.raise_for_status()
j = {}
if 'Retry-After' in res.headers:
raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.'
'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After']))
if 'require_login' in j:
raise HTTPError('403 Forbidden: API key is incorrect for this domain')
if 'error' in j:
raise HTTPError('{}: {}'.format(j.get('description'),
j.get('errors')))
# Catch any other errors
try:
res.raise_for_status()
except Exception as e:
raise HTTPError("{}: {}".format(e, j))
return j | Returns JSON response or raise exception if errors are present | train | https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L293-L318 | null | class API(object):
def __init__(self, domain, api_key):
"""Creates a wrapper to perform API actions.
Arguments:
domain: the Freshdesk domain (not custom). e.g. company.freshdesk.com
api_key: the API key
Instances:
.tickets: the Ticket API
"""
self._api_prefix = 'https://{}/'.format(domain.rstrip('/'))
self.auth = (api_key, 'X')
self.headers = {'Content-Type': 'application/json'}
self.tickets = TicketAPI(self)
self.contacts = ContactAPI(self)
self.agents = AgentAPI(self)
self.timesheets = TimeAPI(self)
self.customers = CustomerAPI(self)
def _get(self, url, params={}):
"""Wrapper around request.get() to use the API prefix. Returns a JSON response."""
r = requests.get(self._api_prefix + url,
params=params,
headers=self.headers,
auth=self.auth,
)
return self._action(r)
def _post(self, url, data={}):
"""Wrapper around request.post() to use the API prefix. Returns a JSON response."""
r = requests.post(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _put(self, url, data={}):
"""Wrapper around request.put() to use the API prefix. Returns a JSON response."""
r = requests.put(self._api_prefix + url,
data=json.dumps(data),
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
def _delete(self, url):
"""Wrapper around request.delete() to use the API prefix. Returns a JSON response."""
r = requests.delete(self._api_prefix + url,
headers=self.headers,
auth=self.auth,
allow_redirects=False,
)
return self._action(r)
|
pysal/esda | esda/join_counts.py | Join_Counts.by_col | python | def by_col(cls, df, cols, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
if outvals is None:
outvals = []
outvals.extend(['bb', 'p_sim_bw', 'p_sim_bb'])
pvalue = ''
return _univariate_handler(df, cols, w=w, inplace=inplace, pvalue=pvalue,
outvals=outvals, stat=cls,
swapname='bw', **stat_kws) | Function to compute a Join_Count statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_join_count'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Join_Count statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Join_Count statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Join_Count statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Join_Count class in pysal.esda | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/join_counts.py#L171-L214 | [
"def _univariate_handler(df, cols, stat=None, w=None, inplace=True,\n pvalue = 'sim', outvals = None, swapname='', **kwargs):\n \"\"\"\n Compute a univariate descriptive statistic `stat` over columns `cols` in\n `df`.\n\n Parameters\n ----------\n df : pandas.DataFr... | class Join_Counts(object):
"""Binary Join Counts
Parameters
----------
y : array
binary variable measured across n spatial units
w : W
spatial weights instance
permutations : int
number of random permutations for calculation of pseudo-p_values
Attributes
----------
y : array
original variable
w : W
original w object
permutations : int
number of permutations
bb : float
number of black-black joins
ww : float
number of white-white joins
bw : float
number of black-white joins
J : float
number of joins
sim_bb : array
(if permutations>0)
vector of bb values for permuted samples
p_sim_bb : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed bb is greater than under randomness
mean_bb : float
average of permuted bb values
min_bb : float
minimum of permuted bb values
max_bb : float
maximum of permuted bb values
sim_bw : array
(if permutations>0)
vector of bw values for permuted samples
p_sim_bw : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed bw is greater than under randomness
mean_bw : float
average of permuted bw values
min_bw : float
minimum of permuted bw values
max_bw : float
maximum of permuted bw values
Examples
--------
Replicate example from anselin and rey
>>> import numpy as np
>>> import libpysal
>>> w = libpysal.weights.lat2W(4, 4)
>>> y = np.ones(16)
>>> y[0:8] = 0
>>> np.random.seed(12345)
>>> from esda.join_counts import Join_Counts
>>> jc = Join_Counts(y, w)
>>> jc.bb
10.0
>>> jc.bw
4.0
>>> jc.ww
10.0
>>> jc.J
24.0
>>> len(jc.sim_bb)
999
>>> round(jc.p_sim_bb, 3)
0.003
>>> round(np.mean(jc.sim_bb), 3)
5.547
>>> np.max(jc.sim_bb)
10.0
>>> np.min(jc.sim_bb)
0.0
>>> len(jc.sim_bw)
999
>>> jc.p_sim_bw
1.0
>>> np.mean(jc.sim_bw)
12.811811811811811
>>> np.max(jc.sim_bw)
24.0
>>> np.min(jc.sim_bw)
7.0
>>>
Notes
-----
Technical details and derivations can be found in :cite:`cliff81`.
"""
def __init__(self, y, w, permutations=PERMUTATIONS):
y = np.asarray(y).flatten()
w.transformation = 'b' # ensure we have binary weights
self.w = w
self.y = y
self.permutations = permutations
self.J = w.s0 / 2.
self.bb, self.ww, self.bw = self.__calc(self.y)
if permutations:
sim = [self.__calc(np.random.permutation(self.y))
for i in range(permutations)]
sim_jc = np.array(sim)
self.sim_bb = sim_jc[:, 0]
self.min_bb = np.min(self.sim_bb)
self.mean_bb = np.mean(self.sim_bb)
self.max_bb = np.max(self.sim_bb)
self.sim_bw = sim_jc[:, 2]
self.min_bw = np.min(self.sim_bw)
self.mean_bw = np.mean(self.sim_bw)
self.max_bw = np.max(self.sim_bw)
p_sim_bb = self.__pseudop(self.sim_bb, self.bb)
p_sim_bw = self.__pseudop(self.sim_bw, self.bw)
self.p_sim_bb = p_sim_bb
self.p_sim_bw = p_sim_bw
def __calc(self, z):
zl = lag_spatial(self.w, z)
bb = sum(z * zl) / 2.0
zw = 1 - z
zl = lag_spatial(self.w, zw)
ww = sum(zw * zl) / 2.0
bw = self.J - (bb + ww)
return (bb, ww, bw)
def __pseudop(self, sim, jc):
above = sim >= jc
larger = sum(above)
psim = (larger + 1.) / (self.permutations + 1.)
return psim
@property
def _statistic(self):
return self.bw
@classmethod
|
pysal/esda | esda/moran.py | Moran_BV_matrix | python | def Moran_BV_matrix(variables, w, permutations=0, varnames=None):
try:
# check if pandas is installed
import pandas
if isinstance(variables, pandas.DataFrame):
# if yes use variables as df and convert to numpy_array
varnames = pandas.Index.tolist(variables.columns)
variables_n = []
for var in varnames:
variables_n.append(variables[str(var)].values)
else:
variables_n = variables
except ImportError:
variables_n = variables
results = _Moran_BV_Matrix_array(variables=variables_n, w=w,
permutations=permutations,
varnames=varnames)
return results | Bivariate Moran Matrix
Calculates bivariate Moran between all pairs of a set of variables.
Parameters
----------
variables : array or pandas.DataFrame
sequence of variables to be assessed
w : W
a spatial weights object
permutations : int
number of permutations
varnames : list, optional if variables is an array
Strings for variable names. Will add an
attribute to `Moran_BV` objects in results needed for plotting
in `splot` or `.plot()`. Default =None.
Note: If variables is a `pandas.DataFrame` varnames
will automatically be generated
Returns
-------
results : dictionary
(i, j) is the key for the pair of variables, values are
the Moran_BV objects.
Examples
--------
open dbf
>>> import libpysal
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
pull of selected variables from dbf and create numpy arrays for each
>>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
>>> vars = [np.array(f.by_col[var]) for var in varnames]
create a contiguity matrix from an external gal file
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
create an instance of Moran_BV_matrix
>>> from esda.moran import Moran_BV_matrix
>>> res = Moran_BV_matrix(vars, w, varnames = varnames)
check values
>>> round(res[(0, 1)].I,7)
0.1936261
>>> round(res[(3, 0)].I,7)
0.3770138 | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L464-L537 | [
"def _Moran_BV_Matrix_array(variables, w, permutations=0, varnames=None):\n \"\"\"\n Base calculation for MORAN_BV_Matrix\n \"\"\"\n if varnames is None:\n varnames = ['x{}'.format(i) for i in range(k)]\n\n k = len(variables)\n rk = list(range(0, k - 1))\n results = {}\n for i in rk:\... | """
Moran's I Spatial Autocorrelation Statistics
"""
__author__ = "Sergio J. Rey <srey@asu.edu>, \
Dani Arribas-Bel <daniel.arribas.bel@gmail.com>"
from libpysal.weights.spatial_lag import lag_spatial as slag
from .smoothing import assuncao_rate
from .tabular import _univariate_handler, _bivariate_handler
import scipy.stats as stats
import numpy as np
__all__ = ["Moran", "Moran_Local", "Moran_BV", "Moran_BV_matrix",
"Moran_Local_BV", "Moran_Rate", "Moran_Local_Rate"]
PERMUTATIONS = 999
class Moran(object):
"""Moran's I Global Autocorrelation Statistic
Parameters
----------
y : array
variable measured across n spatial units
w : W
spatial weights instance
transformation : string
weights transformation, default is row-standardized "r".
Other options include "B": binary, "D":
doubly-standardized, "U": untransformed
(general weights), "V": variance-stabilizing.
permutations : int
number of random permutations for calculation of
pseudo-p_values
two_tailed : boolean
If True (default) analytical p-values for Moran are two
tailed, otherwise if False, they are one-tailed.
Attributes
----------
y : array
original variable
w : W
original w object
permutations : int
number of permutations
I : float
value of Moran's I
EI : float
expected value under normality assumption
VI_norm : float
variance of I under normality assumption
seI_norm : float
standard deviation of I under normality assumption
z_norm : float
z-value of I under normality assumption
p_norm : float
p-value of I under normality assumption
VI_rand : float
variance of I under randomization assumption
seI_rand : float
standard deviation of I under randomization assumption
z_rand : float
z-value of I under randomization assumption
p_rand : float
p-value of I under randomization assumption
two_tailed : boolean
If True p_norm and p_rand are two-tailed, otherwise they
are one-tailed.
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-tailed)
null: spatial randomness
alternative: the observed I is extreme if
it is either extremely greater or extremely lower
than the values obtained based on permutations
EI_sim : float
(if permutations>0)
average value of I from permutations
VI_sim : float
(if permutations>0)
variance of I from permutations
seI_sim : float
(if permutations>0)
standard deviation of I under permutations.
z_sim : float
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
permutations
Notes
-----
Technical details and derivations can be found in :cite:`cliff81`.
Examples
--------
>>> import libpysal
>>> w = libpysal.io.open(libpysal.examples.get_path("stl.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("stl_hom.txt"))
>>> y = np.array(f.by_col['HR8893'])
>>> from esda.moran import Moran
>>> mi = Moran(y, w)
>>> round(mi.I, 3)
0.244
>>> mi.EI
-0.012987012987012988
>>> mi.p_norm
0.00027147862770937614
SIDS example replicating OpenGeoda
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> SIDR = np.array(f.by_col("SIDR74"))
>>> mi = Moran(SIDR, w)
>>> round(mi.I, 3)
0.248
>>> mi.p_norm
0.0001158330781489969
One-tailed
>>> mi_1 = Moran(SIDR, w, two_tailed=False)
>>> round(mi_1.I, 3)
0.248
>>> round(mi_1.p_norm, 4)
0.0001
"""
def __init__(self, y, w, transformation="r", permutations=PERMUTATIONS,
two_tailed=True):
y = np.asarray(y).flatten()
self.y = y
w.transform = transformation
self.w = w
self.permutations = permutations
self.__moments()
self.I = self.__calc(self.z)
self.z_norm = (self.I - self.EI) / self.seI_norm
self.z_rand = (self.I - self.EI) / self.seI_rand
if self.z_norm > 0:
self.p_norm = 1 - stats.norm.cdf(self.z_norm)
self.p_rand = 1 - stats.norm.cdf(self.z_rand)
else:
self.p_norm = stats.norm.cdf(self.z_norm)
self.p_rand = stats.norm.cdf(self.z_rand)
if two_tailed:
self.p_norm *= 2.
self.p_rand *= 2.
if permutations:
sim = [self.__calc(np.random.permutation(self.z))
for i in range(permutations)]
self.sim = sim = np.array(sim)
above = sim >= self.I
larger = above.sum()
if (self.permutations - larger) < larger:
larger = self.permutations - larger
self.p_sim = (larger + 1.) / (permutations + 1.)
self.EI_sim = sim.sum() / permutations
self.seI_sim = np.array(sim).std()
self.VI_sim = self.seI_sim ** 2
self.z_sim = (self.I - self.EI_sim) / self.seI_sim
if self.z_sim > 0:
self.p_z_sim = 1 - stats.norm.cdf(self.z_sim)
else:
self.p_z_sim = stats.norm.cdf(self.z_sim)
# provide .z attribute that is znormalized
sy = y.std()
self.z /= sy
def __moments(self):
self.n = len(self.y)
y = self.y
z = y - y.mean()
self.z = z
self.z2ss = (z * z).sum()
self.EI = -1. / (self.n - 1)
n = self.n
n2 = n * n
s1 = self.w.s1
s0 = self.w.s0
s2 = self.w.s2
s02 = s0 * s0
v_num = n2 * s1 - n * s2 + 3 * s02
v_den = (n - 1) * (n + 1) * s02
self.VI_norm = v_num / v_den - (1.0 / (n - 1)) ** 2
self.seI_norm = self.VI_norm ** (1 / 2.)
# variance under randomization
xd4 = z**4
xd2 = z**2
k_num = xd4.sum() / n
k_den = (xd2.sum() / n)**2
k = k_num / k_den
EI = self.EI
A = n * ((n2 - 3 * n + 3) * s1 - n * s2 + 3 * s02)
B = k * ((n2 - n) * s1 - 2 * n * s2 + 6 * s02 )
VIR = (A - B) / ((n - 1) * (n - 2) * (n - 3 ) * s02) - EI*EI
self.VI_rand = VIR
self.seI_rand = VIR ** (1 / 2.)
def __calc(self, z):
zl = slag(self.w, z)
inum = (z * zl).sum()
return self.n / self.w.s0 * inum / self.z2ss
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.I
@classmethod
def by_col(cls, df, cols, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran class in pysal.esda
"""
return _univariate_handler(df, cols, w=w, inplace=inplace, pvalue=pvalue,
outvals=outvals, stat=cls,
swapname=cls.__name__.lower(), **stat_kws)
class Moran_BV(object):
"""
Bivariate Moran's I
Parameters
----------
x : array
x-axis variable
y : array
wy will be on y axis
w : W
weight instance assumed to be aligned with y
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
Attributes
----------
zx : array
original x variable standardized by mean and std
zy : array
original y variable standardized by mean and std
w : W
original w object
permutation : int
number of permutations
I : float
value of bivariate Moran's I
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : float
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed I is extreme
it is either extremely high or extremely low
EI_sim : array
(if permutations>0)
average value of I from permutations
VI_sim : array
(if permutations>0)
variance of I from permutations
seI_sim : array
(if permutations>0)
standard deviation of I under permutations.
z_sim : array
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
permutations
Notes
-----
Inference is only based on permutations as analytical results are not too
reliable.
Examples
--------
>>> import libpysal
>>> import numpy as np
Set random number generator seed so we can replicate the example
>>> np.random.seed(10)
Open the sudden infant death dbf file and read in rates for 74 and 79
converting each to a numpy array
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> SIDR74 = np.array(f.by_col['SIDR74'])
>>> SIDR79 = np.array(f.by_col['SIDR79'])
Read a GAL file and construct our spatial weights object
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
Create an instance of Moran_BV
>>> from esda.moran import Moran_BV
>>> mbi = Moran_BV(SIDR79, SIDR74, w)
What is the bivariate Moran's I value
>>> round(mbi.I, 3)
0.156
Based on 999 permutations, what is the p-value of our statistic
>>> round(mbi.p_z_sim, 3)
0.001
"""
def __init__(self, x, y, w, transformation="r", permutations=PERMUTATIONS):
x = np.asarray(x).flatten()
y = np.asarray(y).flatten()
zy = (y - y.mean()) / y.std(ddof=1)
zx = (x - x.mean()) / x.std(ddof=1)
self.y = y
self.x = x
self.zx = zx
self.zy = zy
n = x.shape[0]
self.den = n - 1. # zx'zx = zy'zy = n-1
w.transform = transformation
self.w = w
self.I = self.__calc(zy)
if permutations:
nrp = np.random.permutation
sim = [self.__calc(nrp(zy)) for i in range(permutations)]
self.sim = sim = np.array(sim)
above = sim >= self.I
larger = above.sum()
if (permutations - larger) < larger:
larger = permutations - larger
self.p_sim = (larger + 1.) / (permutations + 1.)
self.EI_sim = sim.sum() / permutations
self.seI_sim = np.array(sim).std()
self.VI_sim = self.seI_sim ** 2
self.z_sim = (self.I - self.EI_sim) / self.seI_sim
if self.z_sim > 0:
self.p_z_sim = 1 - stats.norm.cdf(self.z_sim)
else:
self.p_z_sim = stats.norm.cdf(self.z_sim)
def __calc(self, zy):
wzy = slag(self.w, zy)
self.num = (self.zx * wzy).sum()
return self.num / self.den
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.I
@classmethod
def by_col(cls, df, x, y=None, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran_BV statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_local'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_BV statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_BV statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_BV statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_BV class in pysal.esda
"""
return _bivariate_handler(df, x, y=y, w=w, inplace=inplace,
pvalue = pvalue, outvals = outvals,
swapname=cls.__name__.lower(), stat=cls,**stat_kws)
def Moran_BV_matrix(variables, w, permutations=0, varnames=None):
"""
Bivariate Moran Matrix
Calculates bivariate Moran between all pairs of a set of variables.
Parameters
----------
variables : array or pandas.DataFrame
sequence of variables to be assessed
w : W
a spatial weights object
permutations : int
number of permutations
varnames : list, optional if variables is an array
Strings for variable names. Will add an
attribute to `Moran_BV` objects in results needed for plotting
in `splot` or `.plot()`. Default =None.
Note: If variables is a `pandas.DataFrame` varnames
will automatically be generated
Returns
-------
results : dictionary
(i, j) is the key for the pair of variables, values are
the Moran_BV objects.
Examples
--------
open dbf
>>> import libpysal
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
pull of selected variables from dbf and create numpy arrays for each
>>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
>>> vars = [np.array(f.by_col[var]) for var in varnames]
create a contiguity matrix from an external gal file
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
create an instance of Moran_BV_matrix
>>> from esda.moran import Moran_BV_matrix
>>> res = Moran_BV_matrix(vars, w, varnames = varnames)
check values
>>> round(res[(0, 1)].I,7)
0.1936261
>>> round(res[(3, 0)].I,7)
0.3770138
"""
try:
# check if pandas is installed
import pandas
if isinstance(variables, pandas.DataFrame):
# if yes use variables as df and convert to numpy_array
varnames = pandas.Index.tolist(variables.columns)
variables_n = []
for var in varnames:
variables_n.append(variables[str(var)].values)
else:
variables_n = variables
except ImportError:
variables_n = variables
results = _Moran_BV_Matrix_array(variables=variables_n, w=w,
permutations=permutations,
varnames=varnames)
return results
def _Moran_BV_Matrix_array(variables, w, permutations=0, varnames=None):
"""
Base calculation for MORAN_BV_Matrix
"""
if varnames is None:
varnames = ['x{}'.format(i) for i in range(k)]
k = len(variables)
rk = list(range(0, k - 1))
results = {}
for i in rk:
for j in range(i + 1, k):
y1 = variables[i]
y2 = variables[j]
results[i, j] = Moran_BV(y1, y2, w, permutations=permutations)
results[j, i] = Moran_BV(y2, y1, w, permutations=permutations)
results[i, j].varnames = {'x': varnames[i], 'y': varnames[j]}
results[j, i].varnames = {'x': varnames[j], 'y': varnames[i]}
return results
class Moran_Rate(Moran):
"""
Adjusted Moran's I Global Autocorrelation Statistic for Rate
Variables :cite:`Assun_o_1999`
Parameters
----------
e : array
an event variable measured across n spatial units
b : array
a population-at-risk variable measured across n spatial
units
w : W
spatial weights instance
adjusted : boolean
whether or not Moran's I needs to be adjusted for rate
variable
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
two_tailed : boolean
If True (default), analytical p-values for Moran's I are
two-tailed, otherwise they are one tailed.
permutations : int
number of random permutations for calculation of pseudo
p_values
Attributes
----------
y : array
rate variable computed from parameters e and b
if adjusted is True, y is standardized rates
otherwise, y is raw rates
w : W
original w object
permutations : int
number of permutations
I : float
value of Moran's I
EI : float
expected value under normality assumption
VI_norm : float
variance of I under normality assumption
seI_norm : float
standard deviation of I under normality assumption
z_norm : float
z-value of I under normality assumption
p_norm : float
p-value of I under normality assumption
VI_rand : float
variance of I under randomization assumption
seI_rand : float
standard deviation of I under randomization assumption
z_rand : float
z-value of I under randomization assumption
p_rand : float
p-value of I under randomization assumption
two_tailed : boolean
If True, p_norm and p_rand are two-tailed p-values,
otherwise they are one-tailed.
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed I is extreme if it is
either extremely greater or extremely lower than the values
obtained from permutaitons
EI_sim : float
(if permutations>0)
average value of I from permutations
VI_sim : float
(if permutations>0)
variance of I from permutations
seI_sim : float
(if permutations>0)
standard deviation of I under permutations.
z_sim : float
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
Examples
--------
>>> import libpysal
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> e = np.array(f.by_col('SID79'))
>>> b = np.array(f.by_col('BIR79'))
>>> from esda.moran import Moran_Rate
>>> mi = Moran_Rate(e, b, w, two_tailed=False)
>>> "%6.4f" % mi.I
'0.1662'
>>> "%6.4f" % mi.p_norm
'0.0042'
"""
def __init__(self, e, b, w, adjusted=True, transformation="r",
permutations=PERMUTATIONS, two_tailed=True):
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
if adjusted:
y = assuncao_rate(e, b)
else:
y = e * 1.0 / b
Moran.__init__(self, y, w, transformation=transformation,
permutations=permutations, two_tailed=two_tailed)
@classmethod
def by_col(cls, df, events, populations, w=None, inplace=False,
pvalue='sim', outvals=None, swapname='', **stat_kws):
"""
Function to compute a Moran_Rate statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_rate'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Rate statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Rate statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Rate statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Rate class in pysal.esda
"""
if not inplace:
new = df.copy()
cls.by_col(new, events, populations, w=w, inplace=True,
pvalue=pvalue, outvals=outvals, swapname=swapname,
**stat_kws)
return new
if isinstance(events, str):
events = [events]
if isinstance(populations, str):
populations = [populations]
if len(populations) < len(events):
populations = populations * len(events)
if len(events) != len(populations):
raise ValueError('There is not a one-to-one matching between events and '
'populations!\nEvents: {}\n\nPopulations:'
' {}'.format(events, populations))
adjusted = stat_kws.pop('adjusted', True)
if isinstance(adjusted, bool):
adjusted = [adjusted] * len(events)
if swapname is '':
swapname = cls.__name__.lower()
rates = [assuncao_rate(df[e], df[pop]) if adj
else df[e].astype(float) / df[pop]
for e,pop,adj in zip(events, populations, adjusted)]
names = ['-'.join((e,p)) for e,p in zip(events, populations)]
out_df = df.copy()
rate_df = out_df.from_items(list(zip(names, rates))) #trick to avoid importing pandas
stat_df = _univariate_handler(rate_df, names, w=w, inplace=False,
pvalue = pvalue, outvals = outvals,
swapname=swapname,
stat=Moran, #how would this get done w/super?
**stat_kws)
for col in stat_df.columns:
df[col] = stat_df[col]
class Moran_Local(object):
"""Local Moran Statistics
Parameters
----------
y : array
(n,1), attribute array
w : W
weight instance assumed to be aligned with y
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
geoda_quads : boolean
(default=False)
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4
Attributes
----------
y : array
original variable
w : W
original w object
permutations : int
number of random permutations for calculation of pseudo
p_values
Is : array
local Moran's I values
q : array
(if permutations>0)
values indicate quandrant location 1 HH, 2 LH, 3 LL, 4 HL
sim : array (permutations by n)
(if permutations>0)
I values for permuted samples
p_sim : array
(if permutations>0)
p-values based on permutations (one-sided)
null: spatial randomness
alternative: the observed Ii is further away or extreme
from the median of simulated values. It is either extremelyi
high or extremely low in the distribution of simulated Is.
EI_sim : array
(if permutations>0)
average values of local Is from permutations
VI_sim : array
(if permutations>0)
variance of Is from permutations
seI_sim : array
(if permutations>0)
standard deviations of Is under permutations.
z_sim : arrray
(if permutations>0)
standardized Is based on permutations
p_z_sim : array
(if permutations>0)
p-values based on standard normal approximation from
permutations (one-sided)
for two-sided tests, these values should be multiplied by 2
Notes
-----
For technical details see :cite:`Anselin95`.
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> np.random.seed(10)
>>> w = libpysal.io.open(libpysal.examples.get_path("desmith.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("desmith.txt"))
>>> y = np.array(f.by_col['z'])
>>> from esda.moran import Moran_Local
>>> lm = Moran_Local(y, w, transformation = "r", permutations = 99)
>>> lm.q
array([4, 4, 4, 2, 3, 3, 1, 4, 3, 3])
>>> lm.p_z_sim[0]
0.24669152541631179
>>> lm = Moran_Local(y, w, transformation = "r", permutations = 99, \
geoda_quads=True)
>>> lm.q
array([4, 4, 4, 3, 2, 2, 1, 4, 2, 2])
Note random components result is slightly different values across
architectures so the results have been removed from doctests and will be
moved into unittests that are conditional on architectures
"""
def __init__(self, y, w, transformation="r", permutations=PERMUTATIONS,
geoda_quads=False):
y = np.asarray(y).flatten()
self.y = y
n = len(y)
self.n = n
self.n_1 = n - 1
z = y - y.mean()
# setting for floating point noise
orig_settings = np.seterr()
np.seterr(all="ignore")
sy = y.std()
z /= sy
np.seterr(**orig_settings)
self.z = z
w.transform = transformation
self.w = w
self.permutations = permutations
self.den = (z * z).sum()
self.Is = self.calc(self.w, self.z)
self.geoda_quads = geoda_quads
quads = [1, 2, 3, 4]
if geoda_quads:
quads = [1, 3, 2, 4]
self.quads = quads
self.__quads()
if permutations:
self.__crand()
sim = np.transpose(self.rlisas)
above = sim >= self.Is
larger = above.sum(0)
low_extreme = (self.permutations - larger) < larger
larger[low_extreme] = self.permutations - larger[low_extreme]
self.p_sim = (larger + 1.0) / (permutations + 1.0)
self.sim = sim
self.EI_sim = sim.mean(axis=0)
self.seI_sim = sim.std(axis=0)
self.VI_sim = self.seI_sim * self.seI_sim
self.z_sim = (self.Is - self.EI_sim) / self.seI_sim
self.p_z_sim = 1 - stats.norm.cdf(np.abs(self.z_sim))
def calc(self, w, z):
zl = slag(w, z)
return self.n_1 * self.z * zl / self.den
def __crand(self):
"""
conditional randomization
for observation i with ni neighbors, the candidate set cannot include
i (we don't want i being a neighbor of i). we have to sample without
replacement from a set of ids that doesn't include i. numpy doesn't
directly support sampling wo replacement and it is expensive to
implement this. instead we omit i from the original ids, permute the
ids and take the first ni elements of the permuted ids as the
neighbors to i in each randomization.
"""
z = self.z
lisas = np.zeros((self.n, self.permutations))
n_1 = self.n - 1
prange = list(range(self.permutations))
k = self.w.max_neighbors + 1
nn = self.n - 1
rids = np.array([np.random.permutation(nn)[0:k] for i in prange])
ids = np.arange(self.w.n)
ido = self.w.id_order
w = [self.w.weights[ido[i]] for i in ids]
wc = [self.w.cardinalities[ido[i]] for i in ids]
for i in range(self.w.n):
idsi = ids[ids != i]
np.random.shuffle(idsi)
tmp = z[idsi[rids[:, 0:wc[i]]]]
lisas[i] = z[i] * (w[i] * tmp).sum(1)
self.rlisas = (n_1 / self.den) * lisas
def __quads(self):
zl = slag(self.w, self.z)
zp = self.z > 0
lp = zl > 0
pp = zp * lp
np = (1 - zp) * lp
nn = (1 - zp) * (1 - lp)
pn = zp * (1 - lp)
self.q = self.quads[0] * pp + self.quads[1] * np + self.quads[2] * nn \
+ self.quads[3] * pn
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.Is
@classmethod
def by_col(cls, df, cols, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran_Local statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_local'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Local statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Local statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Local statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Local class in pysal.esda
"""
return _univariate_handler(df, cols, w=w, inplace=inplace, pvalue=pvalue,
outvals=outvals, stat=cls,
swapname=cls.__name__.lower(), **stat_kws)
class Moran_Local_BV(object):
"""Bivariate Local Moran Statistics
Parameters
----------
x : array
x-axis variable
y : array
(n,1), wy will be on y axis
w : W
weight instance assumed to be aligned with y
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
geoda_quads : boolean
(default=False)
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4
Attributes
----------
zx : array
original x variable standardized by mean and std
zy : array
original y variable standardized by mean and std
w : W
original w object
permutations : int
number of random permutations for calculation of pseudo
p_values
Is : float
value of Moran's I
q : array
(if permutations>0)
values indicate quandrant location 1 HH, 2 LH, 3 LL, 4 HL
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed Ii is further away or extreme
from the median of simulated values. It is either extremelyi
high or extremely low in the distribution of simulated Is.
EI_sim : array
(if permutations>0)
average values of local Is from permutations
VI_sim : array
(if permutations>0)
variance of Is from permutations
seI_sim : array
(if permutations>0)
standard deviations of Is under permutations.
z_sim : arrray
(if permutations>0)
standardized Is based on permutations
p_z_sim : array
(if permutations>0)
p-values based on standard normal approximation from
permutations (one-sided)
for two-sided tests, these values should be multiplied by 2
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> np.random.seed(10)
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> x = np.array(f.by_col['SIDR79'])
>>> y = np.array(f.by_col['SIDR74'])
>>> from esda.moran import Moran_Local_BV
>>> lm =Moran_Local_BV(x, y, w, transformation = "r", \
permutations = 99)
>>> lm.q[:10]
array([3, 4, 3, 4, 2, 1, 4, 4, 2, 4])
>>> lm = Moran_Local_BV(x, y, w, transformation = "r", \
permutations = 99, geoda_quads=True)
>>> lm.q[:10]
array([2, 4, 2, 4, 3, 1, 4, 4, 3, 4])
Note random components result is slightly different values across
architectures so the results have been removed from doctests and will be
moved into unittests that are conditional on architectures
"""
def __init__(self, x, y, w, transformation="r", permutations=PERMUTATIONS,
geoda_quads=False):
x = np.asarray(x).flatten()
y = np.asarray(y).flatten()
self.y = y
self.x =x
n = len(y)
self.n = n
self.n_1 = n - 1
zx = x - x.mean()
zy = y - y.mean()
# setting for floating point noise
orig_settings = np.seterr()
np.seterr(all="ignore")
sx = x.std()
zx /= sx
sy = y.std()
zy /= sy
np.seterr(**orig_settings)
self.zx = zx
self.zy = zy
w.transform = transformation
self.w = w
self.permutations = permutations
self.den = (zx * zx).sum()
self.Is = self.calc(self.w, self.zx, self.zy)
self.geoda_quads = geoda_quads
quads = [1, 2, 3, 4]
if geoda_quads:
quads = [1, 3, 2, 4]
self.quads = quads
self.__quads()
if permutations:
self.__crand()
sim = np.transpose(self.rlisas)
above = sim >= self.Is
larger = above.sum(0)
low_extreme = (self.permutations - larger) < larger
larger[low_extreme] = self.permutations - larger[low_extreme]
self.p_sim = (larger + 1.0) / (permutations + 1.0)
self.sim = sim
self.EI_sim = sim.mean(axis=0)
self.seI_sim = sim.std(axis=0)
self.VI_sim = self.seI_sim * self.seI_sim
self.z_sim = (self.Is - self.EI_sim) / self.seI_sim
self.p_z_sim = 1 - stats.norm.cdf(np.abs(self.z_sim))
def calc(self, w, zx, zy):
zly = slag(w, zy)
return self.n_1 * self.zx * zly / self.den
def __crand(self):
"""
conditional randomization
for observation i with ni neighbors, the candidate set cannot include
i (we don't want i being a neighbor of i). we have to sample without
replacement from a set of ids that doesn't include i. numpy doesn't
directly support sampling wo replacement and it is expensive to
implement this. instead we omit i from the original ids, permute the
ids and take the first ni elements of the permuted ids as the
neighbors to i in each randomization.
"""
lisas = np.zeros((self.n, self.permutations))
n_1 = self.n - 1
prange = list(range(self.permutations))
k = self.w.max_neighbors + 1
nn = self.n - 1
rids = np.array([np.random.permutation(nn)[0:k] for i in prange])
ids = np.arange(self.w.n)
ido = self.w.id_order
w = [self.w.weights[ido[i]] for i in ids]
wc = [self.w.cardinalities[ido[i]] for i in ids]
zx = self.zx
zy = self.zy
for i in range(self.w.n):
idsi = ids[ids != i]
np.random.shuffle(idsi)
tmp = zy[idsi[rids[:, 0:wc[i]]]]
lisas[i] = zx[i] * (w[i] * tmp).sum(1)
self.rlisas = (n_1 / self.den) * lisas
def __quads(self):
zl = slag(self.w, self.zy)
zp = self.zx > 0
lp = zl > 0
pp = zp * lp
np = (1 - zp) * lp
nn = (1 - zp) * (1 - lp)
pn = zp * (1 - lp)
self.q = self.quads[0] * pp + self.quads[1] * np + self.quads[2] * nn \
+ self.quads[3] * pn
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.Is
@classmethod
def by_col(cls, df, x, y=None, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran_Local_BV statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_local_bv'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Local_BV statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Local_BV statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Local_BV statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Local_BV class in pysal.esda
"""
return _bivariate_handler(df, x, y=y, w=w, inplace=inplace,
pvalue = pvalue, outvals = outvals,
swapname=cls.__name__.lower(), stat=cls,**stat_kws)
class Moran_Local_Rate(Moran_Local):
"""
Adjusted Local Moran Statistics for Rate Variables [Assuncao1999]_
Parameters
----------
e : array
(n,1), an event variable across n spatial units
b : array
(n,1), a population-at-risk variable across n spatial units
w : W
weight instance assumed to be aligned with y
adjusted : boolean
whether or not local Moran statistics need to be adjusted for
rate variable
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
geoda_quads : boolean
(default=False)
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4
Attributes
----------
y : array
rate variables computed from parameters e and b
if adjusted is True, y is standardized rates
otherwise, y is raw rates
w : W
original w object
permutations : int
number of random permutations for calculation of pseudo
p_values
I : float
value of Moran's I
q : array
(if permutations>0)
values indicate quandrant location 1 HH, 2 LH, 3 LL, 4 HL
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed Ii is further away or extreme
from the median of simulated Iis. It is either extremely
high or extremely low in the distribution of simulated Is
EI_sim : float
(if permutations>0)
average value of I from permutations
VI_sim : float
(if permutations>0)
variance of I from permutations
seI_sim : float
(if permutations>0)
standard deviation of I under permutations.
z_sim : float
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
permutations (one-sided)
for two-sided tests, these values should be multiplied by 2
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> np.random.seed(10)
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> e = np.array(f.by_col('SID79'))
>>> b = np.array(f.by_col('BIR79'))
>>> from esda.moran import Moran_Local_Rate
>>> lm = Moran_Local_Rate(e, b, w, transformation = "r", permutations = 99)
>>> lm.q[:10]
array([2, 4, 3, 1, 2, 1, 1, 4, 2, 4])
>>> lm = Moran_Local_Rate(e, b, w, transformation = "r", permutations = 99, geoda_quads=True)
>>> lm.q[:10]
array([3, 4, 2, 1, 3, 1, 1, 4, 3, 4])
Note random components result is slightly different values across
architectures so the results have been removed from doctests and will be
moved into unittests that are conditional on architectures
"""
def __init__(self, e, b, w, adjusted=True, transformation="r",
permutations=PERMUTATIONS, geoda_quads=False):
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
if adjusted:
y = assuncao_rate(e, b)
else:
y = e * 1.0 / b
Moran_Local.__init__(self, y, w,
transformation=transformation,
permutations=permutations,
geoda_quads=geoda_quads)
@classmethod
def by_col(cls, df, events, populations, w=None, inplace=False,
pvalue='sim', outvals=None, swapname='', **stat_kws):
"""
Function to compute a Moran_Local_Rate statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_moran_local_rate'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Local_Rate statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Local_Rate statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Local_Rate statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Local_Rate class in pysal.esda
"""
if not inplace:
new = df.copy()
cls.by_col(new, events, populations, w=w, inplace=True,
pvalue=pvalue, outvals=outvals, swapname=swapname,
**stat_kws)
return new
if isinstance(events, str):
events = [events]
if isinstance(populations, str):
populations = [populations]
if len(populations) < len(events):
populations = populations * len(events)
if len(events) != len(populations):
raise ValueError('There is not a one-to-one matching between events and '
'populations!\nEvents: {}\n\nPopulations:'
' {}'.format(events, populations))
adjusted = stat_kws.pop('adjusted', True)
if isinstance(adjusted, bool):
adjusted = [adjusted] * len(events)
if swapname is '':
swapname = cls.__name__.lower()
rates = [assuncao_rate(df[e], df[pop]) if adj
else df[e].astype(float) / df[pop]
for e,pop,adj in zip(events, populations, adjusted)]
names = ['-'.join((e,p)) for e,p in zip(events, populations)]
out_df = df.copy()
rate_df = out_df.from_items(list(zip(names, rates))) #trick to avoid importing pandas
_univariate_handler(rate_df, names, w=w, inplace=True,
pvalue = pvalue, outvals = outvals,
swapname=swapname,
stat=Moran_Local, #how would this get done w/super?
**stat_kws)
for col in rate_df.columns:
df[col] = rate_df[col]
|
pysal/esda | esda/moran.py | _Moran_BV_Matrix_array | python | def _Moran_BV_Matrix_array(variables, w, permutations=0, varnames=None):
if varnames is None:
varnames = ['x{}'.format(i) for i in range(k)]
k = len(variables)
rk = list(range(0, k - 1))
results = {}
for i in rk:
for j in range(i + 1, k):
y1 = variables[i]
y2 = variables[j]
results[i, j] = Moran_BV(y1, y2, w, permutations=permutations)
results[j, i] = Moran_BV(y2, y1, w, permutations=permutations)
results[i, j].varnames = {'x': varnames[i], 'y': varnames[j]}
results[j, i].varnames = {'x': varnames[j], 'y': varnames[i]}
return results | Base calculation for MORAN_BV_Matrix | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L540-L558 | null | """
Moran's I Spatial Autocorrelation Statistics
"""
__author__ = "Sergio J. Rey <srey@asu.edu>, \
Dani Arribas-Bel <daniel.arribas.bel@gmail.com>"
from libpysal.weights.spatial_lag import lag_spatial as slag
from .smoothing import assuncao_rate
from .tabular import _univariate_handler, _bivariate_handler
import scipy.stats as stats
import numpy as np
__all__ = ["Moran", "Moran_Local", "Moran_BV", "Moran_BV_matrix",
"Moran_Local_BV", "Moran_Rate", "Moran_Local_Rate"]
PERMUTATIONS = 999
class Moran(object):
"""Moran's I Global Autocorrelation Statistic
Parameters
----------
y : array
variable measured across n spatial units
w : W
spatial weights instance
transformation : string
weights transformation, default is row-standardized "r".
Other options include "B": binary, "D":
doubly-standardized, "U": untransformed
(general weights), "V": variance-stabilizing.
permutations : int
number of random permutations for calculation of
pseudo-p_values
two_tailed : boolean
If True (default) analytical p-values for Moran are two
tailed, otherwise if False, they are one-tailed.
Attributes
----------
y : array
original variable
w : W
original w object
permutations : int
number of permutations
I : float
value of Moran's I
EI : float
expected value under normality assumption
VI_norm : float
variance of I under normality assumption
seI_norm : float
standard deviation of I under normality assumption
z_norm : float
z-value of I under normality assumption
p_norm : float
p-value of I under normality assumption
VI_rand : float
variance of I under randomization assumption
seI_rand : float
standard deviation of I under randomization assumption
z_rand : float
z-value of I under randomization assumption
p_rand : float
p-value of I under randomization assumption
two_tailed : boolean
If True p_norm and p_rand are two-tailed, otherwise they
are one-tailed.
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-tailed)
null: spatial randomness
alternative: the observed I is extreme if
it is either extremely greater or extremely lower
than the values obtained based on permutations
EI_sim : float
(if permutations>0)
average value of I from permutations
VI_sim : float
(if permutations>0)
variance of I from permutations
seI_sim : float
(if permutations>0)
standard deviation of I under permutations.
z_sim : float
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
permutations
Notes
-----
Technical details and derivations can be found in :cite:`cliff81`.
Examples
--------
>>> import libpysal
>>> w = libpysal.io.open(libpysal.examples.get_path("stl.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("stl_hom.txt"))
>>> y = np.array(f.by_col['HR8893'])
>>> from esda.moran import Moran
>>> mi = Moran(y, w)
>>> round(mi.I, 3)
0.244
>>> mi.EI
-0.012987012987012988
>>> mi.p_norm
0.00027147862770937614
SIDS example replicating OpenGeoda
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> SIDR = np.array(f.by_col("SIDR74"))
>>> mi = Moran(SIDR, w)
>>> round(mi.I, 3)
0.248
>>> mi.p_norm
0.0001158330781489969
One-tailed
>>> mi_1 = Moran(SIDR, w, two_tailed=False)
>>> round(mi_1.I, 3)
0.248
>>> round(mi_1.p_norm, 4)
0.0001
"""
def __init__(self, y, w, transformation="r", permutations=PERMUTATIONS,
two_tailed=True):
y = np.asarray(y).flatten()
self.y = y
w.transform = transformation
self.w = w
self.permutations = permutations
self.__moments()
self.I = self.__calc(self.z)
self.z_norm = (self.I - self.EI) / self.seI_norm
self.z_rand = (self.I - self.EI) / self.seI_rand
if self.z_norm > 0:
self.p_norm = 1 - stats.norm.cdf(self.z_norm)
self.p_rand = 1 - stats.norm.cdf(self.z_rand)
else:
self.p_norm = stats.norm.cdf(self.z_norm)
self.p_rand = stats.norm.cdf(self.z_rand)
if two_tailed:
self.p_norm *= 2.
self.p_rand *= 2.
if permutations:
sim = [self.__calc(np.random.permutation(self.z))
for i in range(permutations)]
self.sim = sim = np.array(sim)
above = sim >= self.I
larger = above.sum()
if (self.permutations - larger) < larger:
larger = self.permutations - larger
self.p_sim = (larger + 1.) / (permutations + 1.)
self.EI_sim = sim.sum() / permutations
self.seI_sim = np.array(sim).std()
self.VI_sim = self.seI_sim ** 2
self.z_sim = (self.I - self.EI_sim) / self.seI_sim
if self.z_sim > 0:
self.p_z_sim = 1 - stats.norm.cdf(self.z_sim)
else:
self.p_z_sim = stats.norm.cdf(self.z_sim)
# provide .z attribute that is znormalized
sy = y.std()
self.z /= sy
def __moments(self):
self.n = len(self.y)
y = self.y
z = y - y.mean()
self.z = z
self.z2ss = (z * z).sum()
self.EI = -1. / (self.n - 1)
n = self.n
n2 = n * n
s1 = self.w.s1
s0 = self.w.s0
s2 = self.w.s2
s02 = s0 * s0
v_num = n2 * s1 - n * s2 + 3 * s02
v_den = (n - 1) * (n + 1) * s02
self.VI_norm = v_num / v_den - (1.0 / (n - 1)) ** 2
self.seI_norm = self.VI_norm ** (1 / 2.)
# variance under randomization
xd4 = z**4
xd2 = z**2
k_num = xd4.sum() / n
k_den = (xd2.sum() / n)**2
k = k_num / k_den
EI = self.EI
A = n * ((n2 - 3 * n + 3) * s1 - n * s2 + 3 * s02)
B = k * ((n2 - n) * s1 - 2 * n * s2 + 6 * s02 )
VIR = (A - B) / ((n - 1) * (n - 2) * (n - 3 ) * s02) - EI*EI
self.VI_rand = VIR
self.seI_rand = VIR ** (1 / 2.)
def __calc(self, z):
zl = slag(self.w, z)
inum = (z * zl).sum()
return self.n / self.w.s0 * inum / self.z2ss
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.I
@classmethod
def by_col(cls, df, cols, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran class in pysal.esda
"""
return _univariate_handler(df, cols, w=w, inplace=inplace, pvalue=pvalue,
outvals=outvals, stat=cls,
swapname=cls.__name__.lower(), **stat_kws)
class Moran_BV(object):
"""
Bivariate Moran's I
Parameters
----------
x : array
x-axis variable
y : array
wy will be on y axis
w : W
weight instance assumed to be aligned with y
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
Attributes
----------
zx : array
original x variable standardized by mean and std
zy : array
original y variable standardized by mean and std
w : W
original w object
permutation : int
number of permutations
I : float
value of bivariate Moran's I
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : float
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed I is extreme
it is either extremely high or extremely low
EI_sim : array
(if permutations>0)
average value of I from permutations
VI_sim : array
(if permutations>0)
variance of I from permutations
seI_sim : array
(if permutations>0)
standard deviation of I under permutations.
z_sim : array
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
permutations
Notes
-----
Inference is only based on permutations as analytical results are not too
reliable.
Examples
--------
>>> import libpysal
>>> import numpy as np
Set random number generator seed so we can replicate the example
>>> np.random.seed(10)
Open the sudden infant death dbf file and read in rates for 74 and 79
converting each to a numpy array
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> SIDR74 = np.array(f.by_col['SIDR74'])
>>> SIDR79 = np.array(f.by_col['SIDR79'])
Read a GAL file and construct our spatial weights object
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
Create an instance of Moran_BV
>>> from esda.moran import Moran_BV
>>> mbi = Moran_BV(SIDR79, SIDR74, w)
What is the bivariate Moran's I value
>>> round(mbi.I, 3)
0.156
Based on 999 permutations, what is the p-value of our statistic
>>> round(mbi.p_z_sim, 3)
0.001
"""
def __init__(self, x, y, w, transformation="r", permutations=PERMUTATIONS):
x = np.asarray(x).flatten()
y = np.asarray(y).flatten()
zy = (y - y.mean()) / y.std(ddof=1)
zx = (x - x.mean()) / x.std(ddof=1)
self.y = y
self.x = x
self.zx = zx
self.zy = zy
n = x.shape[0]
self.den = n - 1. # zx'zx = zy'zy = n-1
w.transform = transformation
self.w = w
self.I = self.__calc(zy)
if permutations:
nrp = np.random.permutation
sim = [self.__calc(nrp(zy)) for i in range(permutations)]
self.sim = sim = np.array(sim)
above = sim >= self.I
larger = above.sum()
if (permutations - larger) < larger:
larger = permutations - larger
self.p_sim = (larger + 1.) / (permutations + 1.)
self.EI_sim = sim.sum() / permutations
self.seI_sim = np.array(sim).std()
self.VI_sim = self.seI_sim ** 2
self.z_sim = (self.I - self.EI_sim) / self.seI_sim
if self.z_sim > 0:
self.p_z_sim = 1 - stats.norm.cdf(self.z_sim)
else:
self.p_z_sim = stats.norm.cdf(self.z_sim)
def __calc(self, zy):
wzy = slag(self.w, zy)
self.num = (self.zx * wzy).sum()
return self.num / self.den
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.I
@classmethod
def by_col(cls, df, x, y=None, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran_BV statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_local'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_BV statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_BV statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_BV statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_BV class in pysal.esda
"""
return _bivariate_handler(df, x, y=y, w=w, inplace=inplace,
pvalue = pvalue, outvals = outvals,
swapname=cls.__name__.lower(), stat=cls,**stat_kws)
def Moran_BV_matrix(variables, w, permutations=0, varnames=None):
"""
Bivariate Moran Matrix
Calculates bivariate Moran between all pairs of a set of variables.
Parameters
----------
variables : array or pandas.DataFrame
sequence of variables to be assessed
w : W
a spatial weights object
permutations : int
number of permutations
varnames : list, optional if variables is an array
Strings for variable names. Will add an
attribute to `Moran_BV` objects in results needed for plotting
in `splot` or `.plot()`. Default =None.
Note: If variables is a `pandas.DataFrame` varnames
will automatically be generated
Returns
-------
results : dictionary
(i, j) is the key for the pair of variables, values are
the Moran_BV objects.
Examples
--------
open dbf
>>> import libpysal
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
pull of selected variables from dbf and create numpy arrays for each
>>> varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
>>> vars = [np.array(f.by_col[var]) for var in varnames]
create a contiguity matrix from an external gal file
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
create an instance of Moran_BV_matrix
>>> from esda.moran import Moran_BV_matrix
>>> res = Moran_BV_matrix(vars, w, varnames = varnames)
check values
>>> round(res[(0, 1)].I,7)
0.1936261
>>> round(res[(3, 0)].I,7)
0.3770138
"""
try:
# check if pandas is installed
import pandas
if isinstance(variables, pandas.DataFrame):
# if yes use variables as df and convert to numpy_array
varnames = pandas.Index.tolist(variables.columns)
variables_n = []
for var in varnames:
variables_n.append(variables[str(var)].values)
else:
variables_n = variables
except ImportError:
variables_n = variables
results = _Moran_BV_Matrix_array(variables=variables_n, w=w,
permutations=permutations,
varnames=varnames)
return results
class Moran_Rate(Moran):
"""
Adjusted Moran's I Global Autocorrelation Statistic for Rate
Variables :cite:`Assun_o_1999`
Parameters
----------
e : array
an event variable measured across n spatial units
b : array
a population-at-risk variable measured across n spatial
units
w : W
spatial weights instance
adjusted : boolean
whether or not Moran's I needs to be adjusted for rate
variable
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
two_tailed : boolean
If True (default), analytical p-values for Moran's I are
two-tailed, otherwise they are one tailed.
permutations : int
number of random permutations for calculation of pseudo
p_values
Attributes
----------
y : array
rate variable computed from parameters e and b
if adjusted is True, y is standardized rates
otherwise, y is raw rates
w : W
original w object
permutations : int
number of permutations
I : float
value of Moran's I
EI : float
expected value under normality assumption
VI_norm : float
variance of I under normality assumption
seI_norm : float
standard deviation of I under normality assumption
z_norm : float
z-value of I under normality assumption
p_norm : float
p-value of I under normality assumption
VI_rand : float
variance of I under randomization assumption
seI_rand : float
standard deviation of I under randomization assumption
z_rand : float
z-value of I under randomization assumption
p_rand : float
p-value of I under randomization assumption
two_tailed : boolean
If True, p_norm and p_rand are two-tailed p-values,
otherwise they are one-tailed.
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed I is extreme if it is
either extremely greater or extremely lower than the values
obtained from permutaitons
EI_sim : float
(if permutations>0)
average value of I from permutations
VI_sim : float
(if permutations>0)
variance of I from permutations
seI_sim : float
(if permutations>0)
standard deviation of I under permutations.
z_sim : float
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
Examples
--------
>>> import libpysal
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> e = np.array(f.by_col('SID79'))
>>> b = np.array(f.by_col('BIR79'))
>>> from esda.moran import Moran_Rate
>>> mi = Moran_Rate(e, b, w, two_tailed=False)
>>> "%6.4f" % mi.I
'0.1662'
>>> "%6.4f" % mi.p_norm
'0.0042'
"""
def __init__(self, e, b, w, adjusted=True, transformation="r",
permutations=PERMUTATIONS, two_tailed=True):
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
if adjusted:
y = assuncao_rate(e, b)
else:
y = e * 1.0 / b
Moran.__init__(self, y, w, transformation=transformation,
permutations=permutations, two_tailed=two_tailed)
@classmethod
def by_col(cls, df, events, populations, w=None, inplace=False,
pvalue='sim', outvals=None, swapname='', **stat_kws):
"""
Function to compute a Moran_Rate statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_rate'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Rate statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Rate statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Rate statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Rate class in pysal.esda
"""
if not inplace:
new = df.copy()
cls.by_col(new, events, populations, w=w, inplace=True,
pvalue=pvalue, outvals=outvals, swapname=swapname,
**stat_kws)
return new
if isinstance(events, str):
events = [events]
if isinstance(populations, str):
populations = [populations]
if len(populations) < len(events):
populations = populations * len(events)
if len(events) != len(populations):
raise ValueError('There is not a one-to-one matching between events and '
'populations!\nEvents: {}\n\nPopulations:'
' {}'.format(events, populations))
adjusted = stat_kws.pop('adjusted', True)
if isinstance(adjusted, bool):
adjusted = [adjusted] * len(events)
if swapname is '':
swapname = cls.__name__.lower()
rates = [assuncao_rate(df[e], df[pop]) if adj
else df[e].astype(float) / df[pop]
for e,pop,adj in zip(events, populations, adjusted)]
names = ['-'.join((e,p)) for e,p in zip(events, populations)]
out_df = df.copy()
rate_df = out_df.from_items(list(zip(names, rates))) #trick to avoid importing pandas
stat_df = _univariate_handler(rate_df, names, w=w, inplace=False,
pvalue = pvalue, outvals = outvals,
swapname=swapname,
stat=Moran, #how would this get done w/super?
**stat_kws)
for col in stat_df.columns:
df[col] = stat_df[col]
class Moran_Local(object):
"""Local Moran Statistics
Parameters
----------
y : array
(n,1), attribute array
w : W
weight instance assumed to be aligned with y
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
geoda_quads : boolean
(default=False)
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4
Attributes
----------
y : array
original variable
w : W
original w object
permutations : int
number of random permutations for calculation of pseudo
p_values
Is : array
local Moran's I values
q : array
(if permutations>0)
values indicate quandrant location 1 HH, 2 LH, 3 LL, 4 HL
sim : array (permutations by n)
(if permutations>0)
I values for permuted samples
p_sim : array
(if permutations>0)
p-values based on permutations (one-sided)
null: spatial randomness
alternative: the observed Ii is further away or extreme
from the median of simulated values. It is either extremelyi
high or extremely low in the distribution of simulated Is.
EI_sim : array
(if permutations>0)
average values of local Is from permutations
VI_sim : array
(if permutations>0)
variance of Is from permutations
seI_sim : array
(if permutations>0)
standard deviations of Is under permutations.
z_sim : arrray
(if permutations>0)
standardized Is based on permutations
p_z_sim : array
(if permutations>0)
p-values based on standard normal approximation from
permutations (one-sided)
for two-sided tests, these values should be multiplied by 2
Notes
-----
For technical details see :cite:`Anselin95`.
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> np.random.seed(10)
>>> w = libpysal.io.open(libpysal.examples.get_path("desmith.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("desmith.txt"))
>>> y = np.array(f.by_col['z'])
>>> from esda.moran import Moran_Local
>>> lm = Moran_Local(y, w, transformation = "r", permutations = 99)
>>> lm.q
array([4, 4, 4, 2, 3, 3, 1, 4, 3, 3])
>>> lm.p_z_sim[0]
0.24669152541631179
>>> lm = Moran_Local(y, w, transformation = "r", permutations = 99, \
geoda_quads=True)
>>> lm.q
array([4, 4, 4, 3, 2, 2, 1, 4, 2, 2])
Note random components result is slightly different values across
architectures so the results have been removed from doctests and will be
moved into unittests that are conditional on architectures
"""
def __init__(self, y, w, transformation="r", permutations=PERMUTATIONS,
geoda_quads=False):
y = np.asarray(y).flatten()
self.y = y
n = len(y)
self.n = n
self.n_1 = n - 1
z = y - y.mean()
# setting for floating point noise
orig_settings = np.seterr()
np.seterr(all="ignore")
sy = y.std()
z /= sy
np.seterr(**orig_settings)
self.z = z
w.transform = transformation
self.w = w
self.permutations = permutations
self.den = (z * z).sum()
self.Is = self.calc(self.w, self.z)
self.geoda_quads = geoda_quads
quads = [1, 2, 3, 4]
if geoda_quads:
quads = [1, 3, 2, 4]
self.quads = quads
self.__quads()
if permutations:
self.__crand()
sim = np.transpose(self.rlisas)
above = sim >= self.Is
larger = above.sum(0)
low_extreme = (self.permutations - larger) < larger
larger[low_extreme] = self.permutations - larger[low_extreme]
self.p_sim = (larger + 1.0) / (permutations + 1.0)
self.sim = sim
self.EI_sim = sim.mean(axis=0)
self.seI_sim = sim.std(axis=0)
self.VI_sim = self.seI_sim * self.seI_sim
self.z_sim = (self.Is - self.EI_sim) / self.seI_sim
self.p_z_sim = 1 - stats.norm.cdf(np.abs(self.z_sim))
def calc(self, w, z):
zl = slag(w, z)
return self.n_1 * self.z * zl / self.den
def __crand(self):
"""
conditional randomization
for observation i with ni neighbors, the candidate set cannot include
i (we don't want i being a neighbor of i). we have to sample without
replacement from a set of ids that doesn't include i. numpy doesn't
directly support sampling wo replacement and it is expensive to
implement this. instead we omit i from the original ids, permute the
ids and take the first ni elements of the permuted ids as the
neighbors to i in each randomization.
"""
z = self.z
lisas = np.zeros((self.n, self.permutations))
n_1 = self.n - 1
prange = list(range(self.permutations))
k = self.w.max_neighbors + 1
nn = self.n - 1
rids = np.array([np.random.permutation(nn)[0:k] for i in prange])
ids = np.arange(self.w.n)
ido = self.w.id_order
w = [self.w.weights[ido[i]] for i in ids]
wc = [self.w.cardinalities[ido[i]] for i in ids]
for i in range(self.w.n):
idsi = ids[ids != i]
np.random.shuffle(idsi)
tmp = z[idsi[rids[:, 0:wc[i]]]]
lisas[i] = z[i] * (w[i] * tmp).sum(1)
self.rlisas = (n_1 / self.den) * lisas
def __quads(self):
zl = slag(self.w, self.z)
zp = self.z > 0
lp = zl > 0
pp = zp * lp
np = (1 - zp) * lp
nn = (1 - zp) * (1 - lp)
pn = zp * (1 - lp)
self.q = self.quads[0] * pp + self.quads[1] * np + self.quads[2] * nn \
+ self.quads[3] * pn
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.Is
@classmethod
def by_col(cls, df, cols, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran_Local statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_local'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Local statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Local statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Local statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Local class in pysal.esda
"""
return _univariate_handler(df, cols, w=w, inplace=inplace, pvalue=pvalue,
outvals=outvals, stat=cls,
swapname=cls.__name__.lower(), **stat_kws)
class Moran_Local_BV(object):
"""Bivariate Local Moran Statistics
Parameters
----------
x : array
x-axis variable
y : array
(n,1), wy will be on y axis
w : W
weight instance assumed to be aligned with y
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
geoda_quads : boolean
(default=False)
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4
Attributes
----------
zx : array
original x variable standardized by mean and std
zy : array
original y variable standardized by mean and std
w : W
original w object
permutations : int
number of random permutations for calculation of pseudo
p_values
Is : float
value of Moran's I
q : array
(if permutations>0)
values indicate quandrant location 1 HH, 2 LH, 3 LL, 4 HL
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed Ii is further away or extreme
from the median of simulated values. It is either extremelyi
high or extremely low in the distribution of simulated Is.
EI_sim : array
(if permutations>0)
average values of local Is from permutations
VI_sim : array
(if permutations>0)
variance of Is from permutations
seI_sim : array
(if permutations>0)
standard deviations of Is under permutations.
z_sim : arrray
(if permutations>0)
standardized Is based on permutations
p_z_sim : array
(if permutations>0)
p-values based on standard normal approximation from
permutations (one-sided)
for two-sided tests, these values should be multiplied by 2
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> np.random.seed(10)
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> x = np.array(f.by_col['SIDR79'])
>>> y = np.array(f.by_col['SIDR74'])
>>> from esda.moran import Moran_Local_BV
>>> lm =Moran_Local_BV(x, y, w, transformation = "r", \
permutations = 99)
>>> lm.q[:10]
array([3, 4, 3, 4, 2, 1, 4, 4, 2, 4])
>>> lm = Moran_Local_BV(x, y, w, transformation = "r", \
permutations = 99, geoda_quads=True)
>>> lm.q[:10]
array([2, 4, 2, 4, 3, 1, 4, 4, 3, 4])
Note random components result is slightly different values across
architectures so the results have been removed from doctests and will be
moved into unittests that are conditional on architectures
"""
def __init__(self, x, y, w, transformation="r", permutations=PERMUTATIONS,
geoda_quads=False):
x = np.asarray(x).flatten()
y = np.asarray(y).flatten()
self.y = y
self.x =x
n = len(y)
self.n = n
self.n_1 = n - 1
zx = x - x.mean()
zy = y - y.mean()
# setting for floating point noise
orig_settings = np.seterr()
np.seterr(all="ignore")
sx = x.std()
zx /= sx
sy = y.std()
zy /= sy
np.seterr(**orig_settings)
self.zx = zx
self.zy = zy
w.transform = transformation
self.w = w
self.permutations = permutations
self.den = (zx * zx).sum()
self.Is = self.calc(self.w, self.zx, self.zy)
self.geoda_quads = geoda_quads
quads = [1, 2, 3, 4]
if geoda_quads:
quads = [1, 3, 2, 4]
self.quads = quads
self.__quads()
if permutations:
self.__crand()
sim = np.transpose(self.rlisas)
above = sim >= self.Is
larger = above.sum(0)
low_extreme = (self.permutations - larger) < larger
larger[low_extreme] = self.permutations - larger[low_extreme]
self.p_sim = (larger + 1.0) / (permutations + 1.0)
self.sim = sim
self.EI_sim = sim.mean(axis=0)
self.seI_sim = sim.std(axis=0)
self.VI_sim = self.seI_sim * self.seI_sim
self.z_sim = (self.Is - self.EI_sim) / self.seI_sim
self.p_z_sim = 1 - stats.norm.cdf(np.abs(self.z_sim))
def calc(self, w, zx, zy):
zly = slag(w, zy)
return self.n_1 * self.zx * zly / self.den
def __crand(self):
"""
conditional randomization
for observation i with ni neighbors, the candidate set cannot include
i (we don't want i being a neighbor of i). we have to sample without
replacement from a set of ids that doesn't include i. numpy doesn't
directly support sampling wo replacement and it is expensive to
implement this. instead we omit i from the original ids, permute the
ids and take the first ni elements of the permuted ids as the
neighbors to i in each randomization.
"""
lisas = np.zeros((self.n, self.permutations))
n_1 = self.n - 1
prange = list(range(self.permutations))
k = self.w.max_neighbors + 1
nn = self.n - 1
rids = np.array([np.random.permutation(nn)[0:k] for i in prange])
ids = np.arange(self.w.n)
ido = self.w.id_order
w = [self.w.weights[ido[i]] for i in ids]
wc = [self.w.cardinalities[ido[i]] for i in ids]
zx = self.zx
zy = self.zy
for i in range(self.w.n):
idsi = ids[ids != i]
np.random.shuffle(idsi)
tmp = zy[idsi[rids[:, 0:wc[i]]]]
lisas[i] = zx[i] * (w[i] * tmp).sum(1)
self.rlisas = (n_1 / self.den) * lisas
def __quads(self):
zl = slag(self.w, self.zy)
zp = self.zx > 0
lp = zl > 0
pp = zp * lp
np = (1 - zp) * lp
nn = (1 - zp) * (1 - lp)
pn = zp * (1 - lp)
self.q = self.quads[0] * pp + self.quads[1] * np + self.quads[2] * nn \
+ self.quads[3] * pn
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.Is
@classmethod
def by_col(cls, df, x, y=None, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran_Local_BV statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_local_bv'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Local_BV statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Local_BV statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Local_BV statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Local_BV class in pysal.esda
"""
return _bivariate_handler(df, x, y=y, w=w, inplace=inplace,
pvalue = pvalue, outvals = outvals,
swapname=cls.__name__.lower(), stat=cls,**stat_kws)
class Moran_Local_Rate(Moran_Local):
"""
Adjusted Local Moran Statistics for Rate Variables [Assuncao1999]_
Parameters
----------
e : array
(n,1), an event variable across n spatial units
b : array
(n,1), a population-at-risk variable across n spatial units
w : W
weight instance assumed to be aligned with y
adjusted : boolean
whether or not local Moran statistics need to be adjusted for
rate variable
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
geoda_quads : boolean
(default=False)
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4
Attributes
----------
y : array
rate variables computed from parameters e and b
if adjusted is True, y is standardized rates
otherwise, y is raw rates
w : W
original w object
permutations : int
number of random permutations for calculation of pseudo
p_values
I : float
value of Moran's I
q : array
(if permutations>0)
values indicate quandrant location 1 HH, 2 LH, 3 LL, 4 HL
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed Ii is further away or extreme
from the median of simulated Iis. It is either extremely
high or extremely low in the distribution of simulated Is
EI_sim : float
(if permutations>0)
average value of I from permutations
VI_sim : float
(if permutations>0)
variance of I from permutations
seI_sim : float
(if permutations>0)
standard deviation of I under permutations.
z_sim : float
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
permutations (one-sided)
for two-sided tests, these values should be multiplied by 2
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> np.random.seed(10)
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> e = np.array(f.by_col('SID79'))
>>> b = np.array(f.by_col('BIR79'))
>>> from esda.moran import Moran_Local_Rate
>>> lm = Moran_Local_Rate(e, b, w, transformation = "r", permutations = 99)
>>> lm.q[:10]
array([2, 4, 3, 1, 2, 1, 1, 4, 2, 4])
>>> lm = Moran_Local_Rate(e, b, w, transformation = "r", permutations = 99, geoda_quads=True)
>>> lm.q[:10]
array([3, 4, 2, 1, 3, 1, 1, 4, 3, 4])
Note random components result is slightly different values across
architectures so the results have been removed from doctests and will be
moved into unittests that are conditional on architectures
"""
def __init__(self, e, b, w, adjusted=True, transformation="r",
permutations=PERMUTATIONS, geoda_quads=False):
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
if adjusted:
y = assuncao_rate(e, b)
else:
y = e * 1.0 / b
Moran_Local.__init__(self, y, w,
transformation=transformation,
permutations=permutations,
geoda_quads=geoda_quads)
@classmethod
def by_col(cls, df, events, populations, w=None, inplace=False,
pvalue='sim', outvals=None, swapname='', **stat_kws):
"""
Function to compute a Moran_Local_Rate statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_moran_local_rate'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Local_Rate statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Local_Rate statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Local_Rate statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Local_Rate class in pysal.esda
"""
if not inplace:
new = df.copy()
cls.by_col(new, events, populations, w=w, inplace=True,
pvalue=pvalue, outvals=outvals, swapname=swapname,
**stat_kws)
return new
if isinstance(events, str):
events = [events]
if isinstance(populations, str):
populations = [populations]
if len(populations) < len(events):
populations = populations * len(events)
if len(events) != len(populations):
raise ValueError('There is not a one-to-one matching between events and '
'populations!\nEvents: {}\n\nPopulations:'
' {}'.format(events, populations))
adjusted = stat_kws.pop('adjusted', True)
if isinstance(adjusted, bool):
adjusted = [adjusted] * len(events)
if swapname is '':
swapname = cls.__name__.lower()
rates = [assuncao_rate(df[e], df[pop]) if adj
else df[e].astype(float) / df[pop]
for e,pop,adj in zip(events, populations, adjusted)]
names = ['-'.join((e,p)) for e,p in zip(events, populations)]
out_df = df.copy()
rate_df = out_df.from_items(list(zip(names, rates))) #trick to avoid importing pandas
_univariate_handler(rate_df, names, w=w, inplace=True,
pvalue = pvalue, outvals = outvals,
swapname=swapname,
stat=Moran_Local, #how would this get done w/super?
**stat_kws)
for col in rate_df.columns:
df[col] = rate_df[col]
|
pysal/esda | esda/moran.py | Moran_BV.by_col | python | def by_col(cls, df, x, y=None, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
return _bivariate_handler(df, x, y=y, w=w, inplace=inplace,
pvalue = pvalue, outvals = outvals,
swapname=cls.__name__.lower(), stat=cls,**stat_kws) | Function to compute a Moran_BV statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_local'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_BV statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_BV statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_BV statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_BV class in pysal.esda | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L415-L461 | [
"def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim',\n outvals=None, **kwargs):\n \"\"\"\n Compute a descriptive bivariate statistic over two sets of columns, `x` and\n `y`, contained in `df`.\n\n Parameters\n ----------\n df : pandas.DataFrame\... | class Moran_BV(object):
"""
Bivariate Moran's I
Parameters
----------
x : array
x-axis variable
y : array
wy will be on y axis
w : W
weight instance assumed to be aligned with y
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
Attributes
----------
zx : array
original x variable standardized by mean and std
zy : array
original y variable standardized by mean and std
w : W
original w object
permutation : int
number of permutations
I : float
value of bivariate Moran's I
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : float
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed I is extreme
it is either extremely high or extremely low
EI_sim : array
(if permutations>0)
average value of I from permutations
VI_sim : array
(if permutations>0)
variance of I from permutations
seI_sim : array
(if permutations>0)
standard deviation of I under permutations.
z_sim : array
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
permutations
Notes
-----
Inference is only based on permutations as analytical results are not too
reliable.
Examples
--------
>>> import libpysal
>>> import numpy as np
Set random number generator seed so we can replicate the example
>>> np.random.seed(10)
Open the sudden infant death dbf file and read in rates for 74 and 79
converting each to a numpy array
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> SIDR74 = np.array(f.by_col['SIDR74'])
>>> SIDR79 = np.array(f.by_col['SIDR79'])
Read a GAL file and construct our spatial weights object
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
Create an instance of Moran_BV
>>> from esda.moran import Moran_BV
>>> mbi = Moran_BV(SIDR79, SIDR74, w)
What is the bivariate Moran's I value
>>> round(mbi.I, 3)
0.156
Based on 999 permutations, what is the p-value of our statistic
>>> round(mbi.p_z_sim, 3)
0.001
"""
def __init__(self, x, y, w, transformation="r", permutations=PERMUTATIONS):
x = np.asarray(x).flatten()
y = np.asarray(y).flatten()
zy = (y - y.mean()) / y.std(ddof=1)
zx = (x - x.mean()) / x.std(ddof=1)
self.y = y
self.x = x
self.zx = zx
self.zy = zy
n = x.shape[0]
self.den = n - 1. # zx'zx = zy'zy = n-1
w.transform = transformation
self.w = w
self.I = self.__calc(zy)
if permutations:
nrp = np.random.permutation
sim = [self.__calc(nrp(zy)) for i in range(permutations)]
self.sim = sim = np.array(sim)
above = sim >= self.I
larger = above.sum()
if (permutations - larger) < larger:
larger = permutations - larger
self.p_sim = (larger + 1.) / (permutations + 1.)
self.EI_sim = sim.sum() / permutations
self.seI_sim = np.array(sim).std()
self.VI_sim = self.seI_sim ** 2
self.z_sim = (self.I - self.EI_sim) / self.seI_sim
if self.z_sim > 0:
self.p_z_sim = 1 - stats.norm.cdf(self.z_sim)
else:
self.p_z_sim = stats.norm.cdf(self.z_sim)
def __calc(self, zy):
wzy = slag(self.w, zy)
self.num = (self.zx * wzy).sum()
return self.num / self.den
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.I
@classmethod
|
pysal/esda | esda/moran.py | Moran_Rate.by_col | python | def by_col(cls, df, events, populations, w=None, inplace=False,
pvalue='sim', outvals=None, swapname='', **stat_kws):
if not inplace:
new = df.copy()
cls.by_col(new, events, populations, w=w, inplace=True,
pvalue=pvalue, outvals=outvals, swapname=swapname,
**stat_kws)
return new
if isinstance(events, str):
events = [events]
if isinstance(populations, str):
populations = [populations]
if len(populations) < len(events):
populations = populations * len(events)
if len(events) != len(populations):
raise ValueError('There is not a one-to-one matching between events and '
'populations!\nEvents: {}\n\nPopulations:'
' {}'.format(events, populations))
adjusted = stat_kws.pop('adjusted', True)
if isinstance(adjusted, bool):
adjusted = [adjusted] * len(events)
if swapname is '':
swapname = cls.__name__.lower()
rates = [assuncao_rate(df[e], df[pop]) if adj
else df[e].astype(float) / df[pop]
for e,pop,adj in zip(events, populations, adjusted)]
names = ['-'.join((e,p)) for e,p in zip(events, populations)]
out_df = df.copy()
rate_df = out_df.from_items(list(zip(names, rates))) #trick to avoid importing pandas
stat_df = _univariate_handler(rate_df, names, w=w, inplace=False,
pvalue = pvalue, outvals = outvals,
swapname=swapname,
stat=Moran, #how would this get done w/super?
**stat_kws)
for col in stat_df.columns:
df[col] = stat_df[col] | Function to compute a Moran_Rate statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_rate'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Rate statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Rate statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Rate statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Rate class in pysal.esda | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L679-L758 | [
"def _univariate_handler(df, cols, stat=None, w=None, inplace=True,\n pvalue = 'sim', outvals = None, swapname='', **kwargs):\n \"\"\"\n Compute a univariate descriptive statistic `stat` over columns `cols` in\n `df`.\n\n Parameters\n ----------\n df : pandas.DataFr... | class Moran_Rate(Moran):
"""
Adjusted Moran's I Global Autocorrelation Statistic for Rate
Variables :cite:`Assun_o_1999`
Parameters
----------
e : array
an event variable measured across n spatial units
b : array
a population-at-risk variable measured across n spatial
units
w : W
spatial weights instance
adjusted : boolean
whether or not Moran's I needs to be adjusted for rate
variable
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
two_tailed : boolean
If True (default), analytical p-values for Moran's I are
two-tailed, otherwise they are one tailed.
permutations : int
number of random permutations for calculation of pseudo
p_values
Attributes
----------
y : array
rate variable computed from parameters e and b
if adjusted is True, y is standardized rates
otherwise, y is raw rates
w : W
original w object
permutations : int
number of permutations
I : float
value of Moran's I
EI : float
expected value under normality assumption
VI_norm : float
variance of I under normality assumption
seI_norm : float
standard deviation of I under normality assumption
z_norm : float
z-value of I under normality assumption
p_norm : float
p-value of I under normality assumption
VI_rand : float
variance of I under randomization assumption
seI_rand : float
standard deviation of I under randomization assumption
z_rand : float
z-value of I under randomization assumption
p_rand : float
p-value of I under randomization assumption
two_tailed : boolean
If True, p_norm and p_rand are two-tailed p-values,
otherwise they are one-tailed.
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed I is extreme if it is
either extremely greater or extremely lower than the values
obtained from permutaitons
EI_sim : float
(if permutations>0)
average value of I from permutations
VI_sim : float
(if permutations>0)
variance of I from permutations
seI_sim : float
(if permutations>0)
standard deviation of I under permutations.
z_sim : float
(if permutations>0)
standardized I based on permutations
p_z_sim : float
(if permutations>0)
p-value based on standard normal approximation from
Examples
--------
>>> import libpysal
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> e = np.array(f.by_col('SID79'))
>>> b = np.array(f.by_col('BIR79'))
>>> from esda.moran import Moran_Rate
>>> mi = Moran_Rate(e, b, w, two_tailed=False)
>>> "%6.4f" % mi.I
'0.1662'
>>> "%6.4f" % mi.p_norm
'0.0042'
"""
def __init__(self, e, b, w, adjusted=True, transformation="r",
permutations=PERMUTATIONS, two_tailed=True):
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
if adjusted:
y = assuncao_rate(e, b)
else:
y = e * 1.0 / b
Moran.__init__(self, y, w, transformation=transformation,
permutations=permutations, two_tailed=two_tailed)
@classmethod
|
pysal/esda | esda/moran.py | Moran_Local_BV.__crand | python | def __crand(self):
lisas = np.zeros((self.n, self.permutations))
n_1 = self.n - 1
prange = list(range(self.permutations))
k = self.w.max_neighbors + 1
nn = self.n - 1
rids = np.array([np.random.permutation(nn)[0:k] for i in prange])
ids = np.arange(self.w.n)
ido = self.w.id_order
w = [self.w.weights[ido[i]] for i in ids]
wc = [self.w.cardinalities[ido[i]] for i in ids]
zx = self.zx
zy = self.zy
for i in range(self.w.n):
idsi = ids[ids != i]
np.random.shuffle(idsi)
tmp = zy[idsi[rids[:, 0:wc[i]]]]
lisas[i] = zx[i] * (w[i] * tmp).sum(1)
self.rlisas = (n_1 / self.den) * lisas | conditional randomization
for observation i with ni neighbors, the candidate set cannot include
i (we don't want i being a neighbor of i). we have to sample without
replacement from a set of ids that doesn't include i. numpy doesn't
directly support sampling wo replacement and it is expensive to
implement this. instead we omit i from the original ids, permute the
ids and take the first ni elements of the permuted ids as the
neighbors to i in each randomization. | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/moran.py#L1140-L1171 | null | class Moran_Local_BV(object):
"""Bivariate Local Moran Statistics
Parameters
----------
x : array
x-axis variable
y : array
(n,1), wy will be on y axis
w : W
weight instance assumed to be aligned with y
transformation : {'R', 'B', 'D', 'U', 'V'}
weights transformation, default is row-standardized "r".
Other options include
"B": binary,
"D": doubly-standardized,
"U": untransformed (general weights),
"V": variance-stabilizing.
permutations : int
number of random permutations for calculation of pseudo
p_values
geoda_quads : boolean
(default=False)
If True use GeoDa scheme: HH=1, LL=2, LH=3, HL=4
If False use PySAL Scheme: HH=1, LH=2, LL=3, HL=4
Attributes
----------
zx : array
original x variable standardized by mean and std
zy : array
original y variable standardized by mean and std
w : W
original w object
permutations : int
number of random permutations for calculation of pseudo
p_values
Is : float
value of Moran's I
q : array
(if permutations>0)
values indicate quandrant location 1 HH, 2 LH, 3 LL, 4 HL
sim : array
(if permutations>0)
vector of I values for permuted samples
p_sim : array
(if permutations>0)
p-value based on permutations (one-sided)
null: spatial randomness
alternative: the observed Ii is further away or extreme
from the median of simulated values. It is either extremelyi
high or extremely low in the distribution of simulated Is.
EI_sim : array
(if permutations>0)
average values of local Is from permutations
VI_sim : array
(if permutations>0)
variance of Is from permutations
seI_sim : array
(if permutations>0)
standard deviations of Is under permutations.
z_sim : arrray
(if permutations>0)
standardized Is based on permutations
p_z_sim : array
(if permutations>0)
p-values based on standard normal approximation from
permutations (one-sided)
for two-sided tests, these values should be multiplied by 2
Examples
--------
>>> import libpysal
>>> import numpy as np
>>> np.random.seed(10)
>>> w = libpysal.io.open(libpysal.examples.get_path("sids2.gal")).read()
>>> f = libpysal.io.open(libpysal.examples.get_path("sids2.dbf"))
>>> x = np.array(f.by_col['SIDR79'])
>>> y = np.array(f.by_col['SIDR74'])
>>> from esda.moran import Moran_Local_BV
>>> lm =Moran_Local_BV(x, y, w, transformation = "r", \
permutations = 99)
>>> lm.q[:10]
array([3, 4, 3, 4, 2, 1, 4, 4, 2, 4])
>>> lm = Moran_Local_BV(x, y, w, transformation = "r", \
permutations = 99, geoda_quads=True)
>>> lm.q[:10]
array([2, 4, 2, 4, 3, 1, 4, 4, 3, 4])
Note random components result is slightly different values across
architectures so the results have been removed from doctests and will be
moved into unittests that are conditional on architectures
"""
def __init__(self, x, y, w, transformation="r", permutations=PERMUTATIONS,
geoda_quads=False):
x = np.asarray(x).flatten()
y = np.asarray(y).flatten()
self.y = y
self.x =x
n = len(y)
self.n = n
self.n_1 = n - 1
zx = x - x.mean()
zy = y - y.mean()
# setting for floating point noise
orig_settings = np.seterr()
np.seterr(all="ignore")
sx = x.std()
zx /= sx
sy = y.std()
zy /= sy
np.seterr(**orig_settings)
self.zx = zx
self.zy = zy
w.transform = transformation
self.w = w
self.permutations = permutations
self.den = (zx * zx).sum()
self.Is = self.calc(self.w, self.zx, self.zy)
self.geoda_quads = geoda_quads
quads = [1, 2, 3, 4]
if geoda_quads:
quads = [1, 3, 2, 4]
self.quads = quads
self.__quads()
if permutations:
self.__crand()
sim = np.transpose(self.rlisas)
above = sim >= self.Is
larger = above.sum(0)
low_extreme = (self.permutations - larger) < larger
larger[low_extreme] = self.permutations - larger[low_extreme]
self.p_sim = (larger + 1.0) / (permutations + 1.0)
self.sim = sim
self.EI_sim = sim.mean(axis=0)
self.seI_sim = sim.std(axis=0)
self.VI_sim = self.seI_sim * self.seI_sim
self.z_sim = (self.Is - self.EI_sim) / self.seI_sim
self.p_z_sim = 1 - stats.norm.cdf(np.abs(self.z_sim))
def calc(self, w, zx, zy):
zly = slag(w, zy)
return self.n_1 * self.zx * zly / self.den
def __quads(self):
zl = slag(self.w, self.zy)
zp = self.zx > 0
lp = zl > 0
pp = zp * lp
np = (1 - zp) * lp
nn = (1 - zp) * (1 - lp)
pn = zp * (1 - lp)
self.q = self.quads[0] * pp + self.quads[1] * np + self.quads[2] * nn \
+ self.quads[3] * pn
@property
def _statistic(self):
"""More consistent hidden attribute to access ESDA statistics"""
return self.Is
@classmethod
def by_col(cls, df, x, y=None, w=None, inplace=False, pvalue='sim', outvals=None, **stat_kws):
"""
Function to compute a Moran_Local_BV statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named
'column_moran_local_bv'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the Moran_Local_BV statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
Moran_Local_BV statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the Moran_Local_BV statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the Moran_Local_BV class in pysal.esda
"""
return _bivariate_handler(df, x, y=y, w=w, inplace=inplace,
pvalue = pvalue, outvals = outvals,
swapname=cls.__name__.lower(), stat=cls,**stat_kws)
|
pysal/esda | esda/smoothing.py | flatten | python | def flatten(l, unique=True):
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l)) | flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6] | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L32-L63 | null | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n)
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861])
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | weighted_median | python | def weighted_median(d, w):
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx] | A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4 | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L66-L113 | null | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n)
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861])
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | sum_by_n | python | def sum_by_n(d, w, n):
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)]) | A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ]) | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L116-L160 | null | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n)
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861])
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | crude_age_standardization | python | def crude_age_standardization(e, b, n):
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n) | A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667]) | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L163-L213 | [
"def sum_by_n(d, w, n):\n \"\"\"A utility function to summarize a data array into n values\n after weighting the array with another weight array w\n\n Parameters\n ----------\n d : array\n (t, 1), numerical values\n w : array\n (t, 1), numerical... | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861])
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | direct_age_standardization | python | def direct_age_standardization(e, b, s, n, alpha=0.05):
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res | A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267 | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L216-L298 | [
"def sum_by_n(d, w, n):\n \"\"\"A utility function to summarize a data array into n values\n after weighting the array with another weight array w\n\n Parameters\n ----------\n d : array\n (t, 1), numerical values\n w : array\n (t, 1), numerical... | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n)
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861])
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | indirect_age_standardization | python | def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res | A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723] | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L301-L379 | [
"def sum_by_n(d, w, n):\n \"\"\"A utility function to summarize a data array into n values\n after weighting the array with another weight array w\n\n Parameters\n ----------\n d : array\n (t, 1), numerical values\n w : array\n (t, 1), numerical... | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n)
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861])
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | standardized_mortality_ratio | python | def standardized_mortality_ratio(e, b, s_e, s_b, n):
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr | A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368 | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L382-L447 | [
"def sum_by_n(d, w, n):\n \"\"\"A utility function to summarize a data array into n values\n after weighting the array with another weight array w\n\n Parameters\n ----------\n d : array\n (t, 1), numerical values\n w : array\n (t, 1), numerical... | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n)
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861])
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | choynowski | python | def choynowski(e, b, n, threshold=None):
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p) | Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294 | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L450-L513 | [
"def sum_by_n(d, w, n):\n \"\"\"A utility function to summarize a data array into n values\n after weighting the array with another weight array w\n\n Parameters\n ----------\n d : array\n (t, 1), numerical values\n w : array\n (t, 1), numerical... | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n)
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def assuncao_rate(e, b):
"""The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861])
"""
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | assuncao_rate | python | def assuncao_rate(e, b):
y = e * 1.0 / b
e_sum, b_sum = sum(e), sum(b)
ebi_b = e_sum * 1.0 / b_sum
s2 = sum(b * ((y - ebi_b) ** 2)) / b_sum
ebi_a = s2 - ebi_b / (float(b_sum) / len(e))
ebi_v = ebi_a + ebi_b / b
return (y - ebi_b) / np.sqrt(ebi_v) | The standardized rates where the mean and stadard deviation used for
the standardization are those of Empirical Bayes rate estimates
The standardized rates resulting from this function are used to compute
Moran's I corrected for rate variables [Choynowski1959]_ .
Parameters
----------
e : array(n, 1)
event variable measured at n spatial units
b : array(n, 1)
population at risk variable measured at n spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 8 regions.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same 8 regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Computing the rates
>>> assuncao_rate(e, b)[:4]
array([ 1.03843594, -0.04099089, -0.56250375, -1.73061861]) | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L516-L564 | null | from __future__ import division
"""
Apply smoothing to rate computation
[Longer Description]
Author(s):
Myunghwa Hwang mhwang4@gmail.com
David Folch dfolch@asu.edu
Luc Anselin luc.anselin@asu.edu
Serge Rey srey@asu.edu
"""
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>, David Folch <dfolch@asu.edu>, Luc Anselin <luc.anselin@asu.edu>, Serge Rey <srey@asu.edu"
from libpysal.weights.weights import W
from libpysal.weights.distance import Kernel
from libpysal.weights.util import get_points_array, comb
from libpysal.cg import Point, Ray, LineSegment
from libpysal.cg import get_angle_between, get_points_dist, get_segment_point_dist,\
get_point_at_angle_and_dist, convex_hull, get_bounding_box
from libpysal.common import np, KDTree, requires as _requires
from libpysal.weights.spatial_lag import lag_spatial as slag
from scipy.stats import gamma, norm, chi2, poisson
from functools import reduce
import doctest
__all__ = ['Excess_Risk', 'Empirical_Bayes', 'Spatial_Empirical_Bayes', 'Spatial_Rate', 'Kernel_Smoother', 'Age_Adjusted_Smoother', 'Disk_Smoother', 'Spatial_Median_Rate', 'Spatial_Filtering', 'Headbanging_Triples', 'Headbanging_Median_Rate', 'flatten', 'weighted_median', 'sum_by_n', 'crude_age_standardization', 'direct_age_standardization', 'indirect_age_standardization', 'standardized_mortality_ratio', 'choynowski', 'assuncao_rate']
def flatten(l, unique=True):
"""flatten a list of lists
Parameters
----------
l : list
of lists
unique : boolean
whether or not only unique items are wanted (default=True)
Returns
-------
list
of single items
Examples
--------
Creating a sample list whose elements are lists of integers
>>> l = [[1, 2], [3, 4, ], [5, 6]]
Applying flatten function
>>> flatten(l)
[1, 2, 3, 4, 5, 6]
"""
l = reduce(lambda x, y: x + y, l)
if not unique:
return list(l)
return list(set(l))
def weighted_median(d, w):
"""A utility function to find a median of d based on w
Parameters
----------
d : array
(n, 1), variable for which median will be found
w : array
(n, 1), variable on which d's median will be decided
Notes
-----
d and w are arranged in the same order
Returns
-------
float
median of d
Examples
--------
Creating an array including five integers.
We will get the median of these integers.
>>> d = np.array([5,4,3,1,2])
Creating another array including weight values for the above integers.
The median of d will be decided with a consideration to these weight
values.
>>> w = np.array([10, 22, 9, 2, 5])
Applying weighted_median function
>>> weighted_median(d, w)
4
"""
dtype = [('w', '%s' % w.dtype), ('v', '%s' % d.dtype)]
d_w = np.array(list(zip(w, d)), dtype=dtype)
d_w.sort(order='v')
reordered_w = d_w['w'].cumsum()
cumsum_threshold = reordered_w[-1] * 1.0 / 2
median_inx = (reordered_w >= cumsum_threshold).nonzero()[0][0]
if reordered_w[median_inx] == cumsum_threshold and len(d) - 1 > median_inx:
return np.sort(d)[median_inx:median_inx + 2].mean()
return np.sort(d)[median_inx]
def sum_by_n(d, w, n):
"""A utility function to summarize a data array into n values
after weighting the array with another weight array w
Parameters
----------
d : array
(t, 1), numerical values
w : array
(t, 1), numerical values for weighting
n : integer
the number of groups
t = c*n (c is a constant)
Returns
-------
: array
(n, 1), an array with summarized values
Examples
--------
Creating an array including four integers.
We will compute weighted means for every two elements.
>>> d = np.array([10, 9, 20, 30])
Here is another array with the weight values for d's elements.
>>> w = np.array([0.5, 0.1, 0.3, 0.8])
We specify the number of groups for which the weighted mean is computed.
>>> n = 2
Applying sum_by_n function
>>> sum_by_n(d, w, n)
array([ 5.9, 30. ])
"""
t = len(d)
h = t // n #must be floor!
d = d * w
return np.array([sum(d[i: i + h]) for i in range(0, t, h)])
def crude_age_standardization(e, b, n):
"""A utility function to compute rate through crude age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
Notes
-----
e and b are arranged in the same order
Returns
-------
: array
(n, 1), age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying crude_age_standardization function to e and b
>>> crude_age_standardization(e, b, n)
array([0.2375 , 0.26666667])
"""
r = e * 1.0 / b
b_by_n = sum_by_n(b, 1.0, n)
age_weight = b * 1.0 / b_by_n.repeat(len(e) // n)
return sum_by_n(r, age_weight, n)
def direct_age_standardization(e, b, s, n, alpha=0.05):
"""A utility function to compute rate through direct age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s : array
(n*h, 1), standard population for each age group across n spatial units
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, and s are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rates and confidence intervals
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([1000, 1000, 1100, 900, 1000, 900, 1100, 900])
For direct age standardization, we also need the data for standard population.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., population distribution for Arizona and California).
Another array including standard population is created.
>>> s = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying direct_age_standardization function to e and b
>>> a, b = [i[0] for i in direct_age_standardization(e, b, s, n)]
>>> round(a, 4)
0.0237
>>> round(b, 4)
0.0267
"""
age_weight = (1.0 / b) * (s * 1.0 / sum_by_n(s, 1.0, n).repeat(len(s) // n))
adjusted_r = sum_by_n(e, age_weight, n)
var_estimate = sum_by_n(e, np.square(age_weight), n)
g_a = np.square(adjusted_r) / var_estimate
g_b = var_estimate / adjusted_r
k = [age_weight[i:i + len(b) // n].max() for i in range(0, len(b),
len(b) // n)]
g_a_k = np.square(adjusted_r + k) / (var_estimate + np.square(k))
g_b_k = (var_estimate + np.square(k)) / (adjusted_r + k)
summed_b = sum_by_n(b, 1.0, n)
res = []
for i in range(len(adjusted_r)):
if adjusted_r[i] == 0:
upper = 0.5 * chi2.ppf(1 - 0.5 * alpha)
lower = 0.0
else:
lower = gamma.ppf(0.5 * alpha, g_a[i], scale=g_b[i])
upper = gamma.ppf(1 - 0.5 * alpha, g_a_k[i], scale=g_b_k[i])
res.append((adjusted_r[i], lower, upper))
return res
def indirect_age_standardization(e, b, s_e, s_b, n, alpha=0.05):
"""A utility function to compute rate through indirect age standardization
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
alpha : float
significance level for confidence interval
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
list
a list of n tuples; a tuple has a rate and its lower and upper limits
age standardized rate
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
For indirect age standardization, we also need the data for standard population and event.
Standard population is a reference population-at-risk (e.g., population distribution for the U.S.)
whose age distribution can be used as a benchmarking point for comparing age distributions
across regions (e.g., popoulation distribution for Arizona and California).
When the same concept is applied to the event variable,
we call it standard event (e.g., the number of cancer patients in the U.S.).
Two additional arrays including standard population and event are created.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> [i[0] for i in indirect_age_standardization(e, b, s_e, s_b, n)]
[0.23723821989528798, 0.2610803324099723]
"""
smr = standardized_mortality_ratio(e, b, s_e, s_b, n)
s_r_all = sum(s_e * 1.0) / sum(s_b * 1.0)
adjusted_r = s_r_all * smr
e_by_n = sum_by_n(e, 1.0, n)
log_smr = np.log(smr)
log_smr_sd = 1.0 / np.sqrt(e_by_n)
norm_thres = norm.ppf(1 - 0.5 * alpha)
log_smr_lower = log_smr - norm_thres * log_smr_sd
log_smr_upper = log_smr + norm_thres * log_smr_sd
smr_lower = np.exp(log_smr_lower) * s_r_all
smr_upper = np.exp(log_smr_upper) * s_r_all
res = list(zip(adjusted_r, smr_lower, smr_upper))
return res
def standardized_mortality_ratio(e, b, s_e, s_b, n):
"""A utility function to compute standardized mortality ratio (SMR).
Parameters
----------
e : array
(n*h, 1), event variable measured for each age group across n spatial units
b : array
(n*h, 1), population at risk variable measured for each age group across n spatial units
s_e : array
(n*h, 1), event variable measured for each age group across n spatial units in a standard population
s_b : array
(n*h, 1), population variable measured for each age group across n spatial units in a standard population
n : integer
the number of spatial units
Notes
-----
e, b, s_e, and s_b are arranged in the same order
Returns
-------
array
(nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
To compute standardized mortality ratio (SMR),
we need two additional arrays for standard population and event.
Creating s_e and s_b for standard event and population, respectively.
>>> s_e = np.array([100, 45, 120, 100, 50, 30, 200, 80])
>>> s_b = np.array([1000, 900, 1000, 900, 1000, 900, 1000, 900])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a, b = standardized_mortality_ratio(e, b, s_e, s_b, n)
>>> round(a, 4)
2.4869
>>> round(b, 4)
2.7368
"""
s_r = s_e * 1.0 / s_b
e_by_n = sum_by_n(e, 1.0, n)
expected = sum_by_n(b, s_r, n)
smr = e_by_n * 1.0 / expected
return smr
def choynowski(e, b, n, threshold=None):
"""Choynowski map probabilities [Choynowski1959]_ .
Parameters
----------
e : array(n*h, 1)
event variable measured for each age group across n spatial units
b : array(n*h, 1)
population at risk variable measured for each age group across n spatial units
n : integer
the number of spatial units
threshold : float
Returns zero for any p-value greater than threshold
Notes
-----
e and b are arranged in the same order
Returns
-------
: array (nx1)
Examples
--------
Creating an array of an event variable (e.g., the number of cancer patients)
for 2 regions in each of which 4 age groups are available.
The first 4 values are event values for 4 age groups in the region 1,
and the next 4 values are for 4 age groups in the region 2.
>>> e = np.array([30, 25, 25, 15, 33, 21, 30, 20])
Creating another array of a population-at-risk variable (e.g., total population)
for the same two regions.
The order for entering values is the same as the case of e.
>>> b = np.array([100, 100, 110, 90, 100, 90, 110, 90])
Specifying the number of regions.
>>> n = 2
Applying indirect_age_standardization function to e and b
>>> a,b = choynowski(e, b, n)
>>> round(a, 3)
0.304
>>> round(b, 3)
0.294
"""
e_by_n = sum_by_n(e, 1.0, n)
b_by_n = sum_by_n(b, 1.0, n)
r_by_n = sum(e_by_n) * 1.0 / sum(b_by_n)
expected = r_by_n * b_by_n
p = []
for index, i in enumerate(e_by_n):
if i <= expected[index]:
p.append(poisson.cdf(i, expected[index]))
else:
p.append(1 - poisson.cdf(i - 1, expected[index]))
if threshold:
p = [i if i < threshold else 0.0 for i in p]
return np.array(p)
class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r
class Excess_Risk(_Smoother):
"""Excess Risk
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
execess risk values
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Excess_Risk class using stl_e and stl_b
>>> er = Excess_Risk(stl_e, stl_b)
Extracting the excess risk values through the property r of the Excess_Risk instance, er
>>> er.r[:10]
array([[0.20665681],
[0.43613787],
[0.42078261],
[0.22066928],
[0.57981596],
[0.35301709],
[0.56407549],
[0.17020994],
[0.3052372 ],
[0.25821905]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = e.sum() * 1.0 / b.sum()
self.r = e * 1.0 / (b * r_mean)
class Empirical_Bayes(_Smoother):
"""Aspatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating an instance of Empirical_Bayes class using stl_e and stl_b
>>> eb = Empirical_Bayes(stl_e, stl_b)
Extracting the risk values through the property r of the Empirical_Bayes instance, eb
>>> eb.r[:10]
array([[2.36718950e-05],
[4.54539167e-05],
[4.78114019e-05],
[2.76907146e-05],
[6.58989323e-05],
[3.66494122e-05],
[5.79952721e-05],
[2.03064590e-05],
[3.31152999e-05],
[3.02748380e-05]])
"""
def __init__(self, e, b):
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
e_sum, b_sum = e.sum() * 1.0, b.sum() * 1.0
r_mean = e_sum / b_sum
rate = e * 1.0 / b
r_variat = rate - r_mean
r_var_left = (b * r_variat * r_variat).sum() * 1.0 / b_sum
r_var_right = r_mean * 1.0 / b.mean()
r_var = r_var_left - r_var_right
weight = r_var / (r_var + r_mean / b)
self.r = weight * rate + (1.0 - weight) * r_mean
class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r
class Spatial_Empirical_Bayes(_Spatial_Smoother):
"""Spatial Empirical Bayes Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from Empirical Bayes Smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Empirical_Bayes class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Empirical_Bayes
>>> s_eb = Spatial_Empirical_Bayes(stl_e, stl_b, stl_w)
Extracting the risk values through the property r of s_eb
>>> s_eb.r[:10]
array([[4.01485749e-05],
[3.62437513e-05],
[4.93034844e-05],
[5.09387329e-05],
[3.72735210e-05],
[3.69333797e-05],
[5.40245456e-05],
[2.99806055e-05],
[3.73034109e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e an b")
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r_mean = Spatial_Rate(e, b, w).r
rate = e * 1.0 / b
r_var_left = np.ones_like(e) * 1.
ngh_num = np.ones_like(e)
bi = slag(w, b) + b
for i, idv in enumerate(w.id_order):
ngh = list(w[idv].keys()) + [idv]
nghi = [w.id2i[k] for k in ngh]
ngh_num[i] = len(nghi)
v = sum(np.square(rate[nghi] - r_mean[i]) * b[nghi])
r_var_left[i] = v
r_var_left = r_var_left / bi
r_var_right = r_mean / (bi / ngh_num)
r_var = r_var_left - r_var_right
r_var[r_var < 0] = 0.0
self.r = r_mean + (rate - r_mean) * (r_var / (r_var + (r_mean / b)))
class Spatial_Rate(_Spatial_Smoother):
"""Spatial Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Creating an instance of Spatial_Rate class using stl_e, stl_b, and stl_w
>>> from esda.smoothing import Spatial_Rate
>>> sr = Spatial_Rate(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of sr
>>> sr.r[:10]
array([[4.59326407e-05],
[3.62437513e-05],
[4.98677081e-05],
[5.09387329e-05],
[3.72735210e-05],
[4.01073093e-05],
[3.79372794e-05],
[3.27019246e-05],
[4.26204928e-05],
[3.47270722e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w.transform = 'b'
w_e, w_b = slag(w, e), slag(w, b)
self.r = (e + w_e) / (b + w_b)
w.transform = 'o'
class Kernel_Smoother(_Spatial_Smoother):
"""Kernal smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : Kernel weights instance
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Examples
--------
Creating an array including event values for 6 regions
>>> e = np.array([10, 1, 3, 4, 2, 5])
Creating another array including population-at-risk values for the 6 regions
>>> b = np.array([100, 15, 20, 20, 80, 90])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying kernel smoothing to e and b
>>> kr = Kernel_Smoother(e, b, kw)
Extracting the smoothed rates through the property r of the Kernel_Smoother instance
>>> kr.r
array([[0.10543301],
[0.0858573 ],
[0.08256196],
[0.09884584],
[0.04756872],
[0.04845298]])
"""
def __init__(self, e, b, w):
if type(w) != Kernel:
raise Error('w must be an instance of Kernel weights')
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
w_e, w_b = slag(w, e), slag(w, b)
self.r = w_e / w_b
class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
def by_col(cls, df, e,b, w=None, s=None, **kwargs):
"""
Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`.
"""
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf
class Disk_Smoother(_Spatial_Smoother):
"""Locally weighted averages or disk smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights matrix
Attributes
----------
r : array (n, 1)
rate values from disk smoothing
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Applying disk smoothing to stl_e and stl_b
>>> sr = Disk_Smoother(stl_e,stl_b,stl_w)
Extracting the risk values through the property r of s_eb
>>> sr.r[:10]
array([[4.56502262e-05],
[3.44027685e-05],
[3.38280487e-05],
[4.78530468e-05],
[3.12278573e-05],
[2.22596997e-05],
[2.67074856e-05],
[2.36924573e-05],
[3.48801587e-05],
[3.09511832e-05]])
"""
def __init__(self, e, b, w):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
else:
e = np.asarray(e).reshape(-1,1)
b = np.asarray(b).reshape(-1,1)
r = e * 1.0 / b
weight_sum = []
for i in w.id_order:
weight_sum.append(sum(w.weights[i]))
self.r = slag(w, r) / np.array(weight_sum).reshape(-1,1)
class Spatial_Median_Rate(_Spatial_Smoother):
"""Spatial Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
iteration : integer
the number of interations
Attributes
----------
r : array (n, 1)
rate values from spatial median rate smoothing
w : spatial weights instance
aw : array (n, 1)
auxiliary weight variable measured across n spatial units
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Creating a spatial weights instance by reading in stl.gal file.
>>> stl_w = libpysal.io.open(libpysal.examples.get_path('stl.gal'), 'r').read()
Ensuring that the elements in the spatial weights instance are ordered
by the given sequential numbers from 1 to the number of observations in stl_hom.csv
>>> if not stl_w.id_order_set: stl_w.id_order = range(1,len(stl) + 1)
Computing spatial median rates without iteration
>>> smr0 = Spatial_Median_Rate(stl_e,stl_b,stl_w)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr0.r[:10]
array([3.96047383e-05, 3.55386859e-05, 3.28308921e-05, 4.30731238e-05,
3.12453969e-05, 1.97300409e-05, 3.10159267e-05, 2.19279204e-05,
2.93763432e-05, 2.93763432e-05])
Recomputing spatial median rates with 5 iterations
>>> smr1 = Spatial_Median_Rate(stl_e,stl_b,stl_w,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr1.r[:10]
array([3.11293620e-05, 2.95956330e-05, 3.11293620e-05, 3.10159267e-05,
2.98436066e-05, 2.76406686e-05, 3.10159267e-05, 2.94788171e-05,
2.99460806e-05, 2.96981070e-05])
Computing spatial median rates by using the base variable as auxilliary weights
without iteration
>>> smr2 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr2.r[:10]
array([5.77412020e-05, 4.46449551e-05, 5.77412020e-05, 5.77412020e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
5.77412020e-05, 4.03987355e-05])
Recomputing spatial median rates by using the base variable as auxilliary weights
with 5 iterations
>>> smr3 = Spatial_Median_Rate(stl_e,stl_b,stl_w,aw=stl_b,iteration=5)
Extracting the computed rates through the property r of the Spatial_Median_Rate instance
>>> smr3.r[:10]
array([3.61363528e-05, 4.46449551e-05, 3.61363528e-05, 3.61363528e-05,
4.46449551e-05, 3.61363528e-05, 3.61363528e-05, 4.46449551e-05,
3.61363528e-05, 4.46449551e-05])
>>>
"""
def __init__(self, e, b, w, aw=None, iteration=1):
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of e and b")
e = np.asarray(e).flatten()
b = np.asarray(b).flatten()
self.r = e * 1.0 / b
self.aw, self.w = aw, w
while iteration:
self.__search_median()
iteration -= 1
def __search_median(self):
r, aw, w = self.r, self.aw, self.w
new_r = []
if self.aw is None:
for i, id in enumerate(w.id_order):
r_disk = np.append(r[i], r[w.neighbor_offsets[id]])
new_r.append(np.median(r_disk))
else:
for i, id in enumerate(w.id_order):
id_d = [i] + list(w.neighbor_offsets[id])
aw_d, r_d = aw[id_d], r[id_d]
new_r.append(weighted_median(r_d, aw_d))
self.r = np.asarray(new_r).reshape(r.shape)
class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells.
"""
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf
class Headbanging_Triples(object):
"""Generate a pseudo spatial weights instance that contains headbanging triples
Parameters
----------
data : array (n, 2)
numpy array of x, y coordinates
w : spatial weights instance
k : integer number of nearest neighbors
t : integer
the number of triples
angle : integer between 0 and 180
the angle criterium for a set of triples
edgecorr : boolean
whether or not correction for edge points is made
Attributes
----------
triples : dictionary
key is observation record id, value is a list of lists of triple ids
extra : dictionary
key is observation record id, value is a list of the following:
tuple of original triple observations
distance between original triple observations
distance between an original triple observation and its extrapolated point
Examples
--------
importing k-nearest neighbor weights creator
>>> import libpysal # doctest: +SKIP
Reading data in stl_hom.csv into stl_db to extract values
for event and population-at-risk variables
>>> stl_db = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'),'r') # doctest: +SKIP
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser # doctest: +SKIP
>>> fromWKT = WKTParser() # doctest: +SKIP
>>> stl_db.cast('WKT',fromWKT) # doctest: +SKIP
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl_db[:,0]]) # doctest: +SKIP
Using the centroids, we create a 5-nearst neighbor weights
>>> w = libpysal.weights.KNN(d,k=5) # doctest: +SKIP
Ensuring that the elements in the spatial weights instance are ordered
by the order of stl_db's IDs
>>> if not w.id_order_set: w.id_order = w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> ht = Headbanging_Triples(d,w,k=5) # doctest: +SKIP
Checking the members of triples
>>> for k, item in ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(5, 6), (10, 6)]
1 [(4, 7), (4, 14), (9, 7)]
2 [(0, 8), (10, 3), (0, 6)]
3 [(4, 2), (2, 12), (8, 4)]
4 [(8, 1), (12, 1), (8, 9)]
Opening sids2.shp file
>>> import libpysal
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'),'r') # doctest: +SKIP
Extracting the centroids of polygons in the sids data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
Creating a 5-nearest neighbors weights from the sids centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
Ensuring that the members in sids_w are ordered by
the order of sids_d's ID
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
Finding headbaning triples by using 5 nearest neighbors
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Finding headbanging triples by using 5 nearest neighbors with edge correction
>>> s_ht2 = Headbanging_Triples(sids_d,sids_w,k=5,edgecor=True) # doctest: +SKIP
Checking the members of the found triples
>>> for k, item in s_ht2.triples.items()[:5]: print(k, item) # doctest: +SKIP
0 [(1, 18), (1, 21), (1, 33)]
1 [(2, 40), (2, 22), (22, 40)]
2 [(39, 22), (1, 9), (39, 17)]
3 [(16, 6), (19, 6), (20, 6)]
4 [(5, 15), (27, 15), (35, 15)]
Checking the extrapolated point that is introduced into the triples
during edge correction
>>> extrapolated = s_ht2.extra[72] # doctest: +SKIP
Checking the observation IDs constituting the extrapolated triple
>>> extrapolated[0] # doctest: +SKIP
(89, 77)
Checking the distances between the extraploated point and the observation 89 and 77
>>> round(extrapolated[1],5), round(extrapolated[2],6) # doctest: +SKIP
(0.33753, 0.302707)
"""
def __init__(self, data, w, k=5, t=3, angle=135.0, edgecor=False):
raise DeprecationWarning('Deprecated')
if k < 3:
raise ValueError("w should be NeareastNeighbors instance & the number of neighbors should be more than 3.")
if not w.id_order_set:
raise ValueError("w id_order must be set to align with the order of data")
self.triples, points = {}, {}
for i, pnt in enumerate(data):
ng = w.neighbor_offsets[i]
points[(i, Point(pnt))] = dict(list(zip(ng, [Point(d)
for d in data[ng]])))
for i, pnt in list(points.keys()):
ng = points[(i, pnt)]
tr, tr_dis = {}, []
for c in comb(list(ng.keys()), 2):
p2, p3 = ng[c[0]], ng[c[-1]]
ang = get_angle_between(Ray(pnt, p2), Ray(pnt, p3))
if ang > angle or (ang < 0.0 and ang + 360 > angle):
tr[tuple(c)] = (p2, p3)
if len(tr) > t:
for c in list(tr.keys()):
p2, p3 = tr[c]
tr_dis.append((get_segment_point_dist(
LineSegment(p2, p3), pnt), c))
tr_dis = sorted(tr_dis)[:t]
self.triples[i] = [trp for dis, trp in tr_dis]
else:
self.triples[i] = list(tr.keys())
if edgecor:
self.extra = {}
ps = dict([(p, i) for i, p in list(points.keys())])
chull = convex_hull(list(ps.keys()))
chull = [p for p in chull if len(self.triples[ps[p]]) == 0]
for point in chull:
key = (ps[point], point)
ng = points[key]
ng_dist = [(get_points_dist(point, p), p) for p in list(ng.values())]
ng_dist_s = sorted(ng_dist, reverse=True)
extra = None
while extra is None and len(ng_dist_s) > 0:
p2 = ng_dist_s.pop()[-1]
p3s = list(ng.values())
p3s.remove(p2)
for p3 in p3s:
dist_p2_p3 = get_points_dist(p2, p3)
dist_p_p2 = get_points_dist(point, p2)
dist_p_p3 = get_points_dist(point, p3)
if dist_p_p2 <= dist_p_p3:
ray1, ray2, s_pnt, dist, c = Ray(p2, point), Ray(p2, p3), p2, dist_p_p2, (ps[p2], ps[p3])
else:
ray1, ray2, s_pnt, dist, c = Ray(p3, point), Ray(p3, p2), p3, dist_p_p3, (ps[p3], ps[p2])
ang = get_angle_between(ray1, ray2)
if ang >= 90 + angle / 2 or (ang < 0 and ang + 360 >= 90 + angle / 2):
ex_point = get_point_at_angle_and_dist(
ray1, angle, dist)
extra = [c, dist_p2_p3, get_points_dist(
s_pnt, ex_point)]
break
self.triples[ps[point]].append(extra[0])
self.extra[ps[point]] = extra
class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
"""
Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place.
"""
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r
|
pysal/esda | esda/smoothing.py | _Smoother.by_col | python | def by_col(cls, df, e,b, inplace=False, **kwargs):
if not inplace:
new = df.copy()
cls.by_col(new, e, b, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi in zip(e,b):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, **kwargs).r | Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`. | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L579-L627 | [
"def by_col(cls, df, e,b, inplace=False, **kwargs):\n \"\"\"\n Compute smoothing by columns in a dataframe.\n\n Parameters\n -----------\n df : pandas.DataFrame\n a dataframe containing the data to be smoothed\n e : string or list of strings\n the name or n... | class _Smoother(object):
"""
This is a helper class that implements things that all smoothers should do.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
|
pysal/esda | esda/smoothing.py | _Spatial_Smoother.by_col | python | def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):
if not inplace:
new = df.copy()
cls.by_col(new, e, b, w=w, inplace=True, **kwargs)
return new
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
if isinstance(w, W):
w = [w] * len(e)
if len(b) == 1 and len(e) > 1:
b = b * len(e)
try:
assert len(e) == len(b)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable!')
for ei, bi, wi in zip(e, b, w):
ename = ei
bname = bi
ei = df[ename]
bi = df[bname]
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[outcol] = cls(ei, bi, w=wi, **kwargs).r | Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`. | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L759-L824 | [
"def by_col(cls, df, e,b, w=None, inplace=False, **kwargs):\n \"\"\"\n Compute smoothing by columns in a dataframe.\n\n Parameters\n -----------\n df : pandas.DataFrame\n a dataframe containing the data to be smoothed\n e : string or list of strings\n the n... | class _Spatial_Smoother(_Smoother):
"""
This is a helper class that implements things that all the things that
spatial smoothers should do.
.
Right now, the only thing that we need to propagate is the by_col function.
TBQH, most of these smoothers should be functions, not classes (aside from
maybe headbanging triples), since they're literally only inits + one
attribute.
"""
def __init__(self):
pass
@classmethod
|
pysal/esda | esda/smoothing.py | Age_Adjusted_Smoother.by_col | python | def by_col(cls, df, e,b, w=None, s=None, **kwargs):
if s is None:
raise Exception('Standard population variable "s" must be supplied.')
import pandas as pd
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if isinstance(s, str):
s = [s]
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
break
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe.')
if isinstance(w, W):
w = [w] * len(e)
if not all(isinstance(wi, W) for wi in w):
raise Exception('Weights object must be an instance of '
' libpysal.weights.W!')
b = b * len(e) if len(b) == 1 and len(e) > 1 else b
s = s * len(e) if len(s) == 1 and len(e) > 1 else s
try:
assert len(e) == len(b)
assert len(e) == len(s)
assert len(e) == len(w)
except AssertionError:
raise ValueError('There is no one-to-one mapping between event'
' variable and population at risk variable, and '
' standard population variable, and spatial '
' weights!')
rdf = []
max_len = 0
for ei, bi, wi, si in zip(e, b, w, s):
ename = ei
bname = bi
h = len(ei) // wi.n
outcol = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
this_r = cls(df[ei], df[bi], w=wi, s=df[si], **kwargs).r
max_len = 0 if len(this_r) > max_len else max_len
rdf.append((outcol, this_r.tolist()))
padded = (r[1] + [None] * max_len for r in rdf)
rdf = list(zip((r[0] for r in rdf), padded))
rdf = pd.DataFrame.from_items(rdf)
return rdf | Compute smoothing by columns in a dataframe.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
w : pysal.weights.W or list of pysal.weights.W
the spatial weights object or objects to use with the
event-population pairs. If not provided and a weights object
is in the dataframe's metadata, that weights object will be
used.
s : string or list of strings
the name or names of columns to use as a standard population
variable for the events `e` and at-risk populations `b`.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a copy of `df` containing the columns. Or, if `inplace`, this returns
None, but implicitly adds columns to `df`. | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L1124-L1208 | null | class Age_Adjusted_Smoother(_Spatial_Smoother):
"""Age-adjusted rate smoothing
Parameters
----------
e : array (n*h, 1)
event variable measured for each age group across n spatial units
b : array (n*h, 1)
population at risk variable measured for each age group across n spatial units
w : spatial weights instance
s : array (n*h, 1)
standard population for each age group across n spatial units
Attributes
----------
r : array (n, 1)
rate values from spatial rate smoothing
Notes
-----
Weights used to smooth age-specific events and populations are simple binary weights
Examples
--------
Creating an array including 12 values for the 6 regions with 2 age groups
>>> e = np.array([10, 8, 1, 4, 3, 5, 4, 3, 2, 1, 5, 3])
Creating another array including 12 population-at-risk values for the 6 regions
>>> b = np.array([100, 90, 15, 30, 25, 20, 30, 20, 80, 80, 90, 60])
For age adjustment, we need another array of values containing standard population
s includes standard population data for the 6 regions
>>> s = np.array([98, 88, 15, 29, 20, 23, 33, 25, 76, 80, 89, 66])
Creating a list containing geographic coordinates of the 6 regions' centroids
>>> points=[(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
Creating a kernel-based spatial weights instance by using the above points
>>> kw=Kernel(points)
Ensuring that the elements in the kernel-based weights are ordered
by the given sequential numbers from 0 to 5
>>> if not kw.id_order_set: kw.id_order = range(0,len(points))
Applying age-adjusted smoothing to e and b
>>> ar = Age_Adjusted_Smoother(e, b, kw, s)
Extracting the smoothed rates through the property r of the Age_Adjusted_Smoother instance
>>> ar.r
array([0.10519625, 0.08494318, 0.06440072, 0.06898604, 0.06952076,
0.05020968])
"""
def __init__(self, e, b, w, s, alpha=0.05):
e = np.asarray(e).reshape(-1, 1)
b = np.asarray(b).reshape(-1, 1)
s = np.asarray(s).flatten()
t = len(e)
h = t // w.n
w.transform = 'b'
e_n, b_n = [], []
for i in range(h):
e_n.append(slag(w, e[i::h]).tolist())
b_n.append(slag(w, b[i::h]).tolist())
e_n = np.array(e_n).reshape((1, t), order='F')[0]
b_n = np.array(b_n).reshape((1, t), order='F')[0]
e_n = e_n.reshape(s.shape)
b_n = b_n.reshape(s.shape)
r = direct_age_standardization(e_n, b_n, s, w.n, alpha=alpha)
self.r = np.array([i[0] for i in r])
w.transform = 'o'
@_requires('pandas')
@classmethod
|
pysal/esda | esda/smoothing.py | Spatial_Filtering.by_col | python | def by_col(cls, df, e, b, x_grid, y_grid, geom_col='geometry', **kwargs):
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
if isinstance(x_grid, (int, float)):
x_grid = [x_grid] * len(e)
if isinstance(y_grid, (int, float)):
y_grid = [y_grid] * len(e)
bbox = get_bounding_box(df[geom_col])
bbox = [[bbox.left, bbox.lower], [bbox.right, bbox.upper]]
data = get_points_array(df[geom_col])
res = []
for ename, bname, xgi, ygi in zip(e, b, x_grid, y_grid):
r = cls(bbox, data, df[ename], df[bname], xgi, ygi, **kwargs)
grid = np.asarray(r.grid).reshape(-1,2)
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
colnames = ('_'.join((name, suffix)) for suffix in ['X', 'Y', 'R'])
items = [(name, col) for name,col in zip(colnames, [grid[:,0],
grid[:,1],
r.r])]
res.append(pd.DataFrame.from_items(items))
outdf = pd.concat(res)
return outdf | Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
x_grid : integer
number of grid cells to use along the x-axis
y_grid : integer
number of grid cells to use along the y-axis
geom_col: string
the name of the column in the dataframe containing the
geometry information.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe of dimension (x_grid*y_grid, 3), containing the
coordinates of the grid cells and the rates associated with those grid
cells. | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L1526-L1584 | null | class Spatial_Filtering(_Smoother):
"""Spatial Filtering
Parameters
----------
bbox : a list of two lists where each list is a pair of coordinates
a bounding box for the entire n spatial units
data : array (n, 2)
x, y coordinates
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
x_grid : integer
the number of cells on x axis
y_grid : integer
the number of cells on y axis
r : float
fixed radius of a moving window
pop : integer
population threshold to create adaptive moving windows
Attributes
----------
grid : array (x_grid*y_grid, 2)
x, y coordinates for grid points
r : array (x_grid*y_grid, 1)
rate values for grid points
Notes
-----
No tool is provided to find an optimal value for r or pop.
Examples
--------
Reading data in stl_hom.csv into stl to extract values
for event and population-at-risk variables
>>> import libpysal
>>> stl = libpysal.io.open(libpysal.examples.get_path('stl_hom.csv'), 'r')
Reading the stl data in the WKT format so that
we can easily extract polygon centroids
>>> from libpysal.io.util.wkt import WKTParser
>>> fromWKT = WKTParser()
>>> stl.cast('WKT',fromWKT)
Extracting polygon centroids through iteration
>>> d = np.array([i.centroid for i in stl[:,0]])
Specifying the bounding box for the stl_hom data.
The bbox should includes two points for the left-bottom and the right-top corners
>>> bbox = [[-92.700676, 36.881809], [-87.916573, 40.3295669]]
The 11th and 14th columns in stl_hom.csv includes the number of homocides and population.
Creating two arrays from these columns.
>>> stl_e, stl_b = np.array(stl[:,10]), np.array(stl[:,13])
Applying spatial filtering by using a 10*10 mesh grid and a moving window
with 2 radius
>>> sf_0 = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,r=2)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf_0.r[:10]
array([4.23561763e-05, 4.45290850e-05, 4.56456221e-05, 4.49133384e-05,
4.39671835e-05, 4.44903042e-05, 4.19845497e-05, 4.11936548e-05,
3.93463504e-05, 4.04376345e-05])
Applying another spatial filtering by allowing the moving window to grow until
600000 people are found in the window
>>> sf = Spatial_Filtering(bbox,d,stl_e,stl_b,10,10,pop=600000)
Checking the size of the reulting array including the rates
>>> sf.r.shape
(100,)
Extracting the resulting rates through the property r of the Spatial_Filtering instance
>>> sf.r[:10]
array([3.73728738e-05, 4.04456300e-05, 4.04456300e-05, 3.81035327e-05,
4.54831940e-05, 4.54831940e-05, 3.75658628e-05, 3.75658628e-05,
3.75658628e-05, 3.75658628e-05])
"""
def __init__(self, bbox, data, e, b, x_grid, y_grid, r=None, pop=None):
e= np.asarray(e).reshape(-1,1)
b= np.asarray(b).reshape(-1,1)
data_tree = KDTree(data)
x_range = bbox[1][0] - bbox[0][0]
y_range = bbox[1][1] - bbox[0][1]
x, y = np.mgrid[bbox[0][0]:bbox[1][0]:float(x_range) / x_grid,
bbox[0][1]:bbox[1][1]:float(y_range) / y_grid]
self.grid = list(zip(x.ravel(), y.ravel()))
self.r = []
if r is None and pop is None:
raise ValueError("Either r or pop should not be None")
if r is not None:
pnts_in_disk = data_tree.query_ball_point(self.grid, r=r)
for i in pnts_in_disk:
r = e[i].sum() * 1.0 / b[i].sum()
self.r.append(r)
if pop is not None:
half_nearest_pnts = data_tree.query(self.grid, k=len(e))[1]
for i in half_nearest_pnts:
e_n, b_n = e[i].cumsum(), b[i].cumsum()
b_n_filter = b_n <= pop
e_n_f, b_n_f = e_n[b_n_filter], b_n[b_n_filter]
if len(e_n_f) == 0:
e_n_f = e_n[[0]]
b_n_f = b_n[[0]]
self.r.append(e_n_f[-1] * 1.0 / b_n_f[-1])
self.r = np.array(self.r)
@_requires('pandas')
@classmethod
|
pysal/esda | esda/smoothing.py | Headbanging_Median_Rate.by_col | python | def by_col(cls, df, e, b, t=None, geom_col='geometry', inplace=False, **kwargs):
import pandas as pd
if not inplace:
new = df.copy()
cls.by_col(new, e, b, t=t, geom_col=geom_col, inplace=True, **kwargs)
return new
import pandas as pd
# prep for application over multiple event/population pairs
if isinstance(e, str):
e = [e]
if isinstance(b, str):
b = [b]
if len(e) > len(b):
b = b * len(e)
data = get_points_array(df[geom_col])
#Headbanging_Triples doesn't take **kwargs, so filter its arguments
# (self, data, w, k=5, t=3, angle=135.0, edgecor=False):
w = kwargs.pop('w', None)
if w is None:
found = False
for k in df._metadata:
w = df.__dict__.get(w, None)
if isinstance(w, W):
found = True
if not found:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
k = kwargs.pop('k', 5)
t = kwargs.pop('t', 3)
angle = kwargs.pop('angle', 135.0)
edgecor = kwargs.pop('edgecor', False)
hbt = Headbanging_Triples(data, w, k=k, t=t, angle=angle,
edgecor=edgecor)
res = []
for ename, bname in zip(e, b):
r = cls(df[ename], df[bname], hbt, **kwargs).r
name = '_'.join(('-'.join((ename, bname)), cls.__name__.lower()))
df[name] = r | Compute smoothing by columns in a dataframe. The bounding box and point
information is computed from the geometry column.
Parameters
-----------
df : pandas.DataFrame
a dataframe containing the data to be smoothed
e : string or list of strings
the name or names of columns containing event variables to be
smoothed
b : string or list of strings
the name or names of columns containing the population
variables to be smoothed
t : Headbanging_Triples instance or list of Headbanging_Triples
list of headbanging triples instances. If not provided, this
is computed from the geometry column of the dataframe.
geom_col: string
the name of the column in the dataframe containing the
geometry information.
inplace : bool
a flag denoting whether to output a copy of `df` with the
relevant smoothed columns appended, or to append the columns
directly to `df` itself.
**kwargs: optional keyword arguments
optional keyword options that are passed directly to the
smoother.
Returns
---------
a new dataframe containing the smoothed Headbanging Median Rates for the
event/population pairs. If done inplace, there is no return value and
`df` is modified in place. | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/smoothing.py#L1958-L2035 | null | class Headbanging_Median_Rate(object):
"""Headbaning Median Rate Smoothing
Parameters
----------
e : array (n, 1)
event variable measured across n spatial units
b : array (n, 1)
population at risk variable measured across n spatial units
t : Headbanging_Triples instance
aw : array (n, 1)
auxilliary weight variable measured across n spatial units
iteration : integer
the number of iterations
Attributes
----------
r : array (n, 1)
rate values from headbanging median smoothing
Examples
--------
>>> import libpysal # doctest: +SKIP
opening the sids2 shapefile
>>> sids = libpysal.io.open(libpysal.examples.get_path('sids2.shp'), 'r') # doctest: +SKIP
extracting the centroids of polygons in the sids2 data
>>> sids_d = np.array([i.centroid for i in sids]) # doctest: +SKIP
creating a 5-nearest neighbors weights from the centroids
>>> sids_w = libpysal.weights.KNN(sids_d,k=5) # doctest: +SKIP
ensuring that the members in sids_w are ordered
>>> if not sids_w.id_order_set: sids_w.id_order = sids_w.id_order # doctest: +SKIP
finding headbanging triples by using 5 neighbors
return outdf
>>> s_ht = Headbanging_Triples(sids_d,sids_w,k=5) # doctest: +SKIP
DeprecationWarning: Deprecated
reading in the sids2 data table
>>> sids_db = libpysal.io.open(libpysal.examples.get_path('sids2.dbf'), 'r') # doctest: +SKIP
extracting the 10th and 9th columns in the sids2.dbf and
using data values as event and population-at-risk variables
>>> s_e, s_b = np.array(sids_db[:,9]), np.array(sids_db[:,8]) # doctest: +SKIP
computing headbanging median rates from s_e, s_b, and s_ht
>>> sids_hb_r = Headbanging_Median_Rate(s_e,s_b,s_ht) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r.r[:5] # doctest: +SKIP
array([ 0.00075586, 0. , 0.0008285 , 0.0018315 , 0.00498891])
recomputing headbanging median rates with 5 iterations
>>> sids_hb_r2 = Headbanging_Median_Rate(s_e,s_b,s_ht,iteration=5) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r2.r[:5] # doctest: +SKIP
array([ 0.0008285 , 0.00084331, 0.00086896, 0.0018315 , 0.00498891])
recomputing headbanging median rates by considring a set of auxilliary weights
>>> sids_hb_r3 = Headbanging_Median_Rate(s_e,s_b,s_ht,aw=s_b) # doctest: +SKIP
extracting the computed rates through the property r of the Headbanging_Median_Rate instance
>>> sids_hb_r3.r[:5] # doctest: +SKIP
array([ 0.00091659, 0. , 0.00156838, 0.0018315 , 0.00498891])
"""
def __init__(self, e, b, t, aw=None, iteration=1):
raise DeprecationWarning('Deprecated')
self.r = e * 1.0 / b
self.tr, self.aw = t.triples, aw
if hasattr(t, 'extra'):
self.extra = t.extra
while iteration:
self.__search_headbanging_median()
iteration -= 1
def __get_screens(self, id, triples, weighted=False):
r, tr = self.r, self.tr
if len(triples) == 0:
return r[id]
if hasattr(self, 'extra') and id in self.extra:
extra = self.extra
trp_r = r[list(triples[0])]
# observed rate
# plus difference in rate scaled by ratio of extrapolated distance
# & observed distance.
trp_r[-1] = trp_r[0] + (trp_r[0] - trp_r[-1]) * (
extra[id][-1] * 1.0 / extra[id][1])
trp_r = sorted(trp_r)
if not weighted:
return r[id], trp_r[0], trp_r[-1]
else:
trp_aw = self.aw[triples[0]]
extra_w = trp_aw[0] + (trp_aw[0] - trp_aw[-
1]) * (extra[id][-1] * 1.0 / extra[id][1])
return r[id], trp_r[0], trp_r[-1], self.aw[id], trp_aw[0] + extra_w
if not weighted:
lowest, highest = [], []
for trp in triples:
trp_r = np.sort(r[list(trp)])
lowest.append(trp_r[0])
highest.append(trp_r[-1])
return r[id], np.median(np.array(lowest)), np.median(np.array(highest))
if weighted:
lowest, highest = [], []
lowest_aw, highest_aw = [], []
for trp in triples:
trp_r = r[list(trp)]
dtype = [('r', '%s' % trp_r.dtype), ('w',
'%s' % self.aw.dtype)]
trp_r = np.array(list(zip(trp_r, list(trp))), dtype=dtype)
trp_r.sort(order='r')
lowest.append(trp_r['r'][0])
highest.append(trp_r['r'][-1])
lowest_aw.append(self.aw[int(round(trp_r['w'][0]))])
highest_aw.append(self.aw[int(round(trp_r['w'][-1]))])
wm_lowest = weighted_median(np.array(lowest), np.array(lowest_aw))
wm_highest = weighted_median(
np.array(highest), np.array(highest_aw))
triple_members = flatten(triples, unique=False)
return r[id], wm_lowest, wm_highest, self.aw[id] * len(triples), self.aw[triple_members].sum()
def __get_median_from_screens(self, screens):
if isinstance(screens, float):
return screens
elif len(screens) == 3:
return np.median(np.array(screens))
elif len(screens) == 5:
rk, wm_lowest, wm_highest, w1, w2 = screens
if rk >= wm_lowest and rk <= wm_highest:
return rk
elif rk < wm_lowest and w1 < w2:
return wm_lowest
elif rk > wm_highest and w1 < w2:
return wm_highest
else:
return rk
def __search_headbanging_median(self):
r, tr = self.r, self.tr
new_r = []
for k in list(tr.keys()):
screens = self.__get_screens(
k, tr[k], weighted=(self.aw is not None))
new_r.append(self.__get_median_from_screens(screens))
self.r = np.array(new_r)
@_requires('pandas')
@classmethod
|
pysal/esda | esda/tabular.py | _univariate_handler | python | def _univariate_handler(df, cols, stat=None, w=None, inplace=True,
pvalue = 'sim', outvals = None, swapname='', **kwargs):
### Preprocess
if not inplace:
new_df = df.copy()
_univariate_handler(new_df, cols, stat=stat, w=w, pvalue=pvalue,
inplace=True, outvals=outvals,
swapname=swapname, **kwargs)
return new_df
if w is None:
for name in df._metadata:
this_obj = df.__dict__.get(name)
if isinstance(this_obj, W):
w = this_obj
if w is None:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
### Prep indexes
if outvals is None:
outvals = []
outvals.insert(0,'_statistic')
if pvalue.lower() in ['all', 'both', '*']:
raise NotImplementedError("If you want more than one type of PValue,add"
" the targeted pvalue type to outvals. For example:"
" Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', "
"'p_rand']")
# this is nontrivial, since we
# can't know which p_value types are on the object without computing it.
# This is because we don't flag them with @properties, so they're just
# arbitrarily assigned post-facto. One solution might be to post-process the
# objects, determine which pvalue types are available, and then grab them
# all if needed.
if pvalue is not '':
outvals.append('p_'+pvalue.lower())
if isinstance(cols, str):
cols = [cols]
### Make closure around weights & apply columnwise
def column_stat(column):
return stat(column.values, w=w, **kwargs)
stat_objs = df[cols].apply(column_stat)
### Assign into dataframe
for col in cols:
stat_obj = stat_objs[col]
y = kwargs.get('y')
if y is not None:
col += '-' + y.name
outcols = ['_'.join((col, val)) for val in outvals]
for colname, attname in zip(outcols, outvals):
df[colname] = stat_obj.__getattribute__(attname)
if swapname is not '':
df.columns = [_swap_ending(col, swapname) if col.endswith('_statistic') else col
for col in df.columns] | Compute a univariate descriptive statistic `stat` over columns `cols` in
`df`.
Parameters
----------
df : pandas.DataFrame
the dataframe containing columns to compute the descriptive
statistics
cols : string or list of strings
one or more names of columns in `df` to use to compute
exploratory descriptive statistics.
stat : callable
a function that takes data as a first argument and any number
of configuration keyword arguments and returns an object
encapsulating the exploratory statistic results
w : pysal.weights.W
the spatial weights object corresponding to the dataframe
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/tabular.py#L10-L98 | [
"def _univariate_handler(df, cols, stat=None, w=None, inplace=True,\n pvalue = 'sim', outvals = None, swapname='', **kwargs):\n \"\"\"\n Compute a univariate descriptive statistic `stat` over columns `cols` in\n `df`.\n\n Parameters\n ----------\n df : pandas.DataFr... | #from ...common import requires as _requires
import itertools as _it
from libpysal.weights import W
# I would like to define it like this, so that you could make a call like:
# Geary(df, 'HOVAL', 'INC', w=W), but this only works in Python3. So, I have to
# use a workaround
#def _statistic(df, *cols, stat=None, w=None, inplace=True,
def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim',
outvals=None, **kwargs):
"""
Compute a descriptive bivariate statistic over two sets of columns, `x` and
`y`, contained in `df`.
Parameters
----------
df : pandas.DataFrame
dataframe in which columns `x` and `y` are contained
x : string or list of strings
one or more column names to use as variates in the bivariate
statistics
y : string or list of strings
one or more column names to use as variates in the bivariate
statistics
w : pysal.weights.W
spatial weights object corresponding to the dataframe `df`
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
real_swapname = kwargs.pop('swapname', '')
if isinstance(y, str):
y = [y]
if isinstance(x, str):
x = [x]
if not inplace:
new_df = df.copy()
_bivariate_handler(new_df, x, y=y, w=w, inplace=True,
swapname=real_swapname,
pvalue=pvalue, outvals=outvals, **kwargs)
return new_df
if y is None:
y = x
for xi,yi in _it.product(x,y):
if xi == yi:
continue
_univariate_handler(df, cols=xi, w=w, y=df[yi], inplace=True,
pvalue=pvalue, outvals=outvals, swapname='', **kwargs)
if real_swapname is not '':
df.columns = [_swap_ending(col, real_swapname)
if col.endswith('_statistic')
else col for col in df.columns]
def _swap_ending(s, ending, delim='_'):
"""
Replace the ending of a string, delimited into an arbitrary
number of chunks by `delim`, with the ending provided
Parameters
----------
s : string
string to replace endings
ending : string
string used to replace ending of `s`
delim : string
string that splits s into one or more parts
Returns
-------
new string where the final chunk of `s`, delimited by `delim`, is replaced
with `ending`.
"""
parts = [x for x in s.split(delim)[:-1] if x != '']
parts.append(ending)
return delim.join(parts)
##############
# DOCSTRINGS #
##############
_univ_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
_bv_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
_rate_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
|
pysal/esda | esda/tabular.py | _bivariate_handler | python | def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim',
outvals=None, **kwargs):
real_swapname = kwargs.pop('swapname', '')
if isinstance(y, str):
y = [y]
if isinstance(x, str):
x = [x]
if not inplace:
new_df = df.copy()
_bivariate_handler(new_df, x, y=y, w=w, inplace=True,
swapname=real_swapname,
pvalue=pvalue, outvals=outvals, **kwargs)
return new_df
if y is None:
y = x
for xi,yi in _it.product(x,y):
if xi == yi:
continue
_univariate_handler(df, cols=xi, w=w, y=df[yi], inplace=True,
pvalue=pvalue, outvals=outvals, swapname='', **kwargs)
if real_swapname is not '':
df.columns = [_swap_ending(col, real_swapname)
if col.endswith('_statistic')
else col for col in df.columns] | Compute a descriptive bivariate statistic over two sets of columns, `x` and
`y`, contained in `df`.
Parameters
----------
df : pandas.DataFrame
dataframe in which columns `x` and `y` are contained
x : string or list of strings
one or more column names to use as variates in the bivariate
statistics
y : string or list of strings
one or more column names to use as variates in the bivariate
statistics
w : pysal.weights.W
spatial weights object corresponding to the dataframe `df`
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/tabular.py#L100-L154 | [
"def _univariate_handler(df, cols, stat=None, w=None, inplace=True,\n pvalue = 'sim', outvals = None, swapname='', **kwargs):\n \"\"\"\n Compute a univariate descriptive statistic `stat` over columns `cols` in\n `df`.\n\n Parameters\n ----------\n df : pandas.DataFr... | #from ...common import requires as _requires
import itertools as _it
from libpysal.weights import W
# I would like to define it like this, so that you could make a call like:
# Geary(df, 'HOVAL', 'INC', w=W), but this only works in Python3. So, I have to
# use a workaround
#def _statistic(df, *cols, stat=None, w=None, inplace=True,
def _univariate_handler(df, cols, stat=None, w=None, inplace=True,
pvalue = 'sim', outvals = None, swapname='', **kwargs):
"""
Compute a univariate descriptive statistic `stat` over columns `cols` in
`df`.
Parameters
----------
df : pandas.DataFrame
the dataframe containing columns to compute the descriptive
statistics
cols : string or list of strings
one or more names of columns in `df` to use to compute
exploratory descriptive statistics.
stat : callable
a function that takes data as a first argument and any number
of configuration keyword arguments and returns an object
encapsulating the exploratory statistic results
w : pysal.weights.W
the spatial weights object corresponding to the dataframe
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
### Preprocess
if not inplace:
new_df = df.copy()
_univariate_handler(new_df, cols, stat=stat, w=w, pvalue=pvalue,
inplace=True, outvals=outvals,
swapname=swapname, **kwargs)
return new_df
if w is None:
for name in df._metadata:
this_obj = df.__dict__.get(name)
if isinstance(this_obj, W):
w = this_obj
if w is None:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
### Prep indexes
if outvals is None:
outvals = []
outvals.insert(0,'_statistic')
if pvalue.lower() in ['all', 'both', '*']:
raise NotImplementedError("If you want more than one type of PValue,add"
" the targeted pvalue type to outvals. For example:"
" Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', "
"'p_rand']")
# this is nontrivial, since we
# can't know which p_value types are on the object without computing it.
# This is because we don't flag them with @properties, so they're just
# arbitrarily assigned post-facto. One solution might be to post-process the
# objects, determine which pvalue types are available, and then grab them
# all if needed.
if pvalue is not '':
outvals.append('p_'+pvalue.lower())
if isinstance(cols, str):
cols = [cols]
### Make closure around weights & apply columnwise
def column_stat(column):
return stat(column.values, w=w, **kwargs)
stat_objs = df[cols].apply(column_stat)
### Assign into dataframe
for col in cols:
stat_obj = stat_objs[col]
y = kwargs.get('y')
if y is not None:
col += '-' + y.name
outcols = ['_'.join((col, val)) for val in outvals]
for colname, attname in zip(outcols, outvals):
df[colname] = stat_obj.__getattribute__(attname)
if swapname is not '':
df.columns = [_swap_ending(col, swapname) if col.endswith('_statistic') else col
for col in df.columns]
def _swap_ending(s, ending, delim='_'):
"""
Replace the ending of a string, delimited into an arbitrary
number of chunks by `delim`, with the ending provided
Parameters
----------
s : string
string to replace endings
ending : string
string used to replace ending of `s`
delim : string
string that splits s into one or more parts
Returns
-------
new string where the final chunk of `s`, delimited by `delim`, is replaced
with `ending`.
"""
parts = [x for x in s.split(delim)[:-1] if x != '']
parts.append(ending)
return delim.join(parts)
##############
# DOCSTRINGS #
##############
_univ_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
_bv_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
_rate_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
|
pysal/esda | esda/tabular.py | _swap_ending | python | def _swap_ending(s, ending, delim='_'):
parts = [x for x in s.split(delim)[:-1] if x != '']
parts.append(ending)
return delim.join(parts) | Replace the ending of a string, delimited into an arbitrary
number of chunks by `delim`, with the ending provided
Parameters
----------
s : string
string to replace endings
ending : string
string used to replace ending of `s`
delim : string
string that splits s into one or more parts
Returns
-------
new string where the final chunk of `s`, delimited by `delim`, is replaced
with `ending`. | train | https://github.com/pysal/esda/blob/2fafc6ec505e153152a86601d3e0fba080610c20/esda/tabular.py#L156-L177 | null | #from ...common import requires as _requires
import itertools as _it
from libpysal.weights import W
# I would like to define it like this, so that you could make a call like:
# Geary(df, 'HOVAL', 'INC', w=W), but this only works in Python3. So, I have to
# use a workaround
#def _statistic(df, *cols, stat=None, w=None, inplace=True,
def _univariate_handler(df, cols, stat=None, w=None, inplace=True,
pvalue = 'sim', outvals = None, swapname='', **kwargs):
"""
Compute a univariate descriptive statistic `stat` over columns `cols` in
`df`.
Parameters
----------
df : pandas.DataFrame
the dataframe containing columns to compute the descriptive
statistics
cols : string or list of strings
one or more names of columns in `df` to use to compute
exploratory descriptive statistics.
stat : callable
a function that takes data as a first argument and any number
of configuration keyword arguments and returns an object
encapsulating the exploratory statistic results
w : pysal.weights.W
the spatial weights object corresponding to the dataframe
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
### Preprocess
if not inplace:
new_df = df.copy()
_univariate_handler(new_df, cols, stat=stat, w=w, pvalue=pvalue,
inplace=True, outvals=outvals,
swapname=swapname, **kwargs)
return new_df
if w is None:
for name in df._metadata:
this_obj = df.__dict__.get(name)
if isinstance(this_obj, W):
w = this_obj
if w is None:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
### Prep indexes
if outvals is None:
outvals = []
outvals.insert(0,'_statistic')
if pvalue.lower() in ['all', 'both', '*']:
raise NotImplementedError("If you want more than one type of PValue,add"
" the targeted pvalue type to outvals. For example:"
" Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', "
"'p_rand']")
# this is nontrivial, since we
# can't know which p_value types are on the object without computing it.
# This is because we don't flag them with @properties, so they're just
# arbitrarily assigned post-facto. One solution might be to post-process the
# objects, determine which pvalue types are available, and then grab them
# all if needed.
if pvalue is not '':
outvals.append('p_'+pvalue.lower())
if isinstance(cols, str):
cols = [cols]
### Make closure around weights & apply columnwise
def column_stat(column):
return stat(column.values, w=w, **kwargs)
stat_objs = df[cols].apply(column_stat)
### Assign into dataframe
for col in cols:
stat_obj = stat_objs[col]
y = kwargs.get('y')
if y is not None:
col += '-' + y.name
outcols = ['_'.join((col, val)) for val in outvals]
for colname, attname in zip(outcols, outvals):
df[colname] = stat_obj.__getattribute__(attname)
if swapname is not '':
df.columns = [_swap_ending(col, swapname) if col.endswith('_statistic') else col
for col in df.columns]
def _bivariate_handler(df, x, y=None, w=None, inplace=True, pvalue='sim',
outvals=None, **kwargs):
"""
Compute a descriptive bivariate statistic over two sets of columns, `x` and
`y`, contained in `df`.
Parameters
----------
df : pandas.DataFrame
dataframe in which columns `x` and `y` are contained
x : string or list of strings
one or more column names to use as variates in the bivariate
statistics
y : string or list of strings
one or more column names to use as variates in the bivariate
statistics
w : pysal.weights.W
spatial weights object corresponding to the dataframe `df`
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
real_swapname = kwargs.pop('swapname', '')
if isinstance(y, str):
y = [y]
if isinstance(x, str):
x = [x]
if not inplace:
new_df = df.copy()
_bivariate_handler(new_df, x, y=y, w=w, inplace=True,
swapname=real_swapname,
pvalue=pvalue, outvals=outvals, **kwargs)
return new_df
if y is None:
y = x
for xi,yi in _it.product(x,y):
if xi == yi:
continue
_univariate_handler(df, cols=xi, w=w, y=df[yi], inplace=True,
pvalue=pvalue, outvals=outvals, swapname='', **kwargs)
if real_swapname is not '':
df.columns = [_swap_ending(col, real_swapname)
if col.endswith('_statistic')
else col for col in df.columns]
##############
# DOCSTRINGS #
##############
_univ_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
cols : string or list of string
name or list of names of columns to use to compute the statistic
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
_bv_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
X : list of strings
column name or list of column names to use as X values to compute
the bivariate statistic. If no Y is provided, pairwise comparisons
among these variates are used instead.
Y : list of strings
column name or list of column names to use as Y values to compute
the bivariate statistic. if no Y is provided, pariwise comparisons
among the X variates are used instead.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
_rate_doc_template =\
"""
Function to compute a {n} statistic on a dataframe
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a geometry column
events : string or list of strings
one or more names where events are stored
populations : string or list of strings
one or more names where the populations corresponding to the
events are stored. If one population column is provided, it is
used for all event columns. If more than one population column
is provided but there is not a population for every event
column, an exception will be raised.
w : pysal weights object
a weights object aligned with the dataframe. If not provided, this
is searched for in the dataframe's metadata
inplace : bool
a boolean denoting whether to operate on the dataframe inplace or to
return a series contaning the results of the computation. If
operating inplace, the derived columns will be named 'column_{nl}'
pvalue : string
a string denoting which pvalue should be returned. Refer to the
the {n} statistic's documentation for available p-values
outvals : list of strings
list of arbitrary attributes to return as columns from the
{n} statistic
**stat_kws : keyword arguments
options to pass to the underlying statistic. For this, see the
documentation for the {n} statistic.
Returns
--------
If inplace, None, and operation is conducted on dataframe in memory. Otherwise,
returns a copy of the dataframe with the relevant columns attached.
See Also
---------
For further documentation, refer to the {n} class in pysal.esda
"""
|
jasonlaska/spherecluster | spherecluster/util.py | sample_vMF | python | def sample_vMF(mu, kappa, num_samples):
dim = len(mu)
result = np.zeros((num_samples, dim))
for nn in range(num_samples):
# sample offset from center (on sphere) with spread kappa
w = _sample_weight(kappa, dim)
# sample a point v on the unit sphere that's orthogonal to mu
v = _sample_orthonormal_to(mu)
# compute new point
result[nn, :] = v * np.sqrt(1. - w ** 2) + w * mu
return result | Generate num_samples N-dimensional samples from von Mises Fisher
distribution around center mu \in R^N with concentration kappa. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L16-L32 | [
"def _sample_weight(kappa, dim):\n \"\"\"Rejection sampling scheme for sampling distance from center on\n surface of the sphere.\n \"\"\"\n dim = dim - 1 # since S^{n-1}\n b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa)\n x = (1. - b) / (1. + b)\n c = kappa * x + dim * np.log(1 - x... | """
Generate multivariate von Mises Fisher samples.
This solution originally appears here:
http://stats.stackexchange.com/questions/156729/sampling-from-von-mises-fisher-distribution-in-python
Also see:
Sampling from vMF on S^2:
https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf
http://www.stat.pitt.edu/sungkyu/software/randvonMisesFisher3.pdf
"""
import numpy as np
def _sample_weight(kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2)
while True:
z = np.random.beta(dim / 2., dim / 2.)
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(u):
return w
def _sample_orthonormal_to(mu):
"""Sample point on sphere orthogonal to mu."""
v = np.random.randn(mu.shape[0])
proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu)
orthto = v - proj_mu_v
return orthto / np.linalg.norm(orthto)
|
jasonlaska/spherecluster | spherecluster/util.py | _sample_weight | python | def _sample_weight(kappa, dim):
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2)
while True:
z = np.random.beta(dim / 2., dim / 2.)
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(u):
return w | Rejection sampling scheme for sampling distance from center on
surface of the sphere. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L35-L49 | null | """
Generate multivariate von Mises Fisher samples.
This solution originally appears here:
http://stats.stackexchange.com/questions/156729/sampling-from-von-mises-fisher-distribution-in-python
Also see:
Sampling from vMF on S^2:
https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf
http://www.stat.pitt.edu/sungkyu/software/randvonMisesFisher3.pdf
"""
import numpy as np
def sample_vMF(mu, kappa, num_samples):
"""Generate num_samples N-dimensional samples from von Mises Fisher
distribution around center mu \in R^N with concentration kappa.
"""
dim = len(mu)
result = np.zeros((num_samples, dim))
for nn in range(num_samples):
# sample offset from center (on sphere) with spread kappa
w = _sample_weight(kappa, dim)
# sample a point v on the unit sphere that's orthogonal to mu
v = _sample_orthonormal_to(mu)
# compute new point
result[nn, :] = v * np.sqrt(1. - w ** 2) + w * mu
return result
def _sample_orthonormal_to(mu):
"""Sample point on sphere orthogonal to mu."""
v = np.random.randn(mu.shape[0])
proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu)
orthto = v - proj_mu_v
return orthto / np.linalg.norm(orthto)
|
jasonlaska/spherecluster | spherecluster/util.py | _sample_orthonormal_to | python | def _sample_orthonormal_to(mu):
v = np.random.randn(mu.shape[0])
proj_mu_v = mu * np.dot(mu, v) / np.linalg.norm(mu)
orthto = v - proj_mu_v
return orthto / np.linalg.norm(orthto) | Sample point on sphere orthogonal to mu. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/util.py#L52-L57 | null | """
Generate multivariate von Mises Fisher samples.
This solution originally appears here:
http://stats.stackexchange.com/questions/156729/sampling-from-von-mises-fisher-distribution-in-python
Also see:
Sampling from vMF on S^2:
https://www.mitsuba-renderer.org/~wenzel/files/vmf.pdf
http://www.stat.pitt.edu/sungkyu/software/randvonMisesFisher3.pdf
"""
import numpy as np
def sample_vMF(mu, kappa, num_samples):
"""Generate num_samples N-dimensional samples from von Mises Fisher
distribution around center mu \in R^N with concentration kappa.
"""
dim = len(mu)
result = np.zeros((num_samples, dim))
for nn in range(num_samples):
# sample offset from center (on sphere) with spread kappa
w = _sample_weight(kappa, dim)
# sample a point v on the unit sphere that's orthogonal to mu
v = _sample_orthonormal_to(mu)
# compute new point
result[nn, :] = v * np.sqrt(1. - w ** 2) + w * mu
return result
def _sample_weight(kappa, dim):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4. * kappa ** 2 + dim ** 2) + 2 * kappa)
x = (1. - b) / (1. + b)
c = kappa * x + dim * np.log(1 - x ** 2)
while True:
z = np.random.beta(dim / 2., dim / 2.)
w = (1. - (1. + b) * z) / (1. - (1. - b) * z)
u = np.random.uniform(low=0, high=1)
if kappa * w + dim * np.log(1. - x * w) - c >= np.log(u):
return w
|
jasonlaska/spherecluster | spherecluster/spherical_kmeans.py | _spherical_kmeans_single_lloyd | python | def _spherical_kmeans_single_lloyd(
X,
n_clusters,
sample_weight=None,
max_iter=300,
init="k-means++",
verbose=False,
x_squared_norms=None,
random_state=None,
tol=1e-4,
precompute_distances=True,
):
random_state = check_random_state(random_state)
sample_weight = _check_sample_weight(X, sample_weight)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(
X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms
)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment
# TODO: _labels_inertia should be done with cosine distance
# since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized
# this doesn't really matter.
labels, inertia = _labels_inertia(
X,
sample_weight,
x_squared_norms,
centers,
precompute_distances=precompute_distances,
distances=distances,
)
# computation of the means
if sp.issparse(X):
centers = _k_means._centers_sparse(
X, sample_weight, labels, n_clusters, distances
)
else:
centers = _k_means._centers_dense(
X, sample_weight, labels, n_clusters, distances
)
# l2-normalize centers (this is the main contibution here)
centers = normalize(centers)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (i, center_shift_total, tol)
)
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = _labels_inertia(
X,
sample_weight,
x_squared_norms,
best_centers,
precompute_distances=precompute_distances,
distances=distances,
)
return best_labels, best_inertia, best_centers, i + 1 | Modified from sklearn.cluster.k_means_.k_means_single_lloyd. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L22-L113 | null | import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.cluster import KMeans
from sklearn.cluster.k_means_ import (
_check_sample_weight,
_init_centroids,
_labels_inertia,
_tolerance,
_validate_center_shape,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import _num_samples
from sklearn.cluster import _k_means
from sklearn.preprocessing import normalize
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils.extmath import row_norms, squared_norm
def spherical_k_means(
X,
n_clusters,
sample_weight=None,
init="k-means++",
n_init=10,
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
n_jobs=1,
algorithm="auto",
return_n_iter=False,
):
"""Modified from sklearn.cluster.k_means_.k_means.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
# avoid forcing order when copy_x=False
order = "C" if copy_x else None
X = check_array(
X, accept_sparse="csr", dtype=[np.float64, np.float32], order=order, copy=copy_x
)
# verify that the number of samples given is larger than k
if _num_samples(X) < n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d" % (_num_samples(X), n_clusters)
)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, order="C", copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _spherical_kmeans_single_lloyd(
X,
n_clusters,
sample_weight,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
x_squared_norms=x_squared_norms,
random_state=random_state,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_spherical_kmeans_single_lloyd)(
X,
n_clusters,
sample_weight,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed,
)
for seed in seeds
)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
class SphericalKMeans(KMeans):
"""Spherical K-Means clustering
Modfication of sklearn.cluster.KMeans where cluster centers are normalized
(projected onto the sphere) in each iteration.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
"""
def __init__(
self,
n_clusters=8,
init="k-means++",
n_init=10,
max_iter=300,
tol=1e-4,
n_jobs=1,
verbose=0,
random_state=None,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.normalize = normalize
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
"""
if self.normalize:
X = normalize(X)
random_state = check_random_state(self.random_state)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means(
X,
n_clusters=self.n_clusters,
sample_weight=sample_weight,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
random_state=random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True,
)
return self
|
jasonlaska/spherecluster | spherecluster/spherical_kmeans.py | spherical_k_means | python | def spherical_k_means(
X,
n_clusters,
sample_weight=None,
init="k-means++",
n_init=10,
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
n_jobs=1,
algorithm="auto",
return_n_iter=False,
):
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
# avoid forcing order when copy_x=False
order = "C" if copy_x else None
X = check_array(
X, accept_sparse="csr", dtype=[np.float64, np.float32], order=order, copy=copy_x
)
# verify that the number of samples given is larger than k
if _num_samples(X) < n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d" % (_num_samples(X), n_clusters)
)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, order="C", copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _spherical_kmeans_single_lloyd(
X,
n_clusters,
sample_weight,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
x_squared_norms=x_squared_norms,
random_state=random_state,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_spherical_kmeans_single_lloyd)(
X,
n_clusters,
sample_weight,
max_iter=max_iter,
init=init,
verbose=verbose,
tol=tol,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed,
)
for seed in seeds
)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia | Modified from sklearn.cluster.k_means_.k_means. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L116-L228 | [
"def _spherical_kmeans_single_lloyd(\n X,\n n_clusters,\n sample_weight=None,\n max_iter=300,\n init=\"k-means++\",\n verbose=False,\n x_squared_norms=None,\n random_state=None,\n tol=1e-4,\n precompute_distances=True,\n):\n \"\"\"\n Modified from sklearn.cluster.k_means_.k_means... | import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.cluster import KMeans
from sklearn.cluster.k_means_ import (
_check_sample_weight,
_init_centroids,
_labels_inertia,
_tolerance,
_validate_center_shape,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import _num_samples
from sklearn.cluster import _k_means
from sklearn.preprocessing import normalize
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils.extmath import row_norms, squared_norm
def _spherical_kmeans_single_lloyd(
X,
n_clusters,
sample_weight=None,
max_iter=300,
init="k-means++",
verbose=False,
x_squared_norms=None,
random_state=None,
tol=1e-4,
precompute_distances=True,
):
"""
Modified from sklearn.cluster.k_means_.k_means_single_lloyd.
"""
random_state = check_random_state(random_state)
sample_weight = _check_sample_weight(X, sample_weight)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(
X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms
)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment
# TODO: _labels_inertia should be done with cosine distance
# since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized
# this doesn't really matter.
labels, inertia = _labels_inertia(
X,
sample_weight,
x_squared_norms,
centers,
precompute_distances=precompute_distances,
distances=distances,
)
# computation of the means
if sp.issparse(X):
centers = _k_means._centers_sparse(
X, sample_weight, labels, n_clusters, distances
)
else:
centers = _k_means._centers_dense(
X, sample_weight, labels, n_clusters, distances
)
# l2-normalize centers (this is the main contibution here)
centers = normalize(centers)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (i, center_shift_total, tol)
)
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = _labels_inertia(
X,
sample_weight,
x_squared_norms,
best_centers,
precompute_distances=precompute_distances,
distances=distances,
)
return best_labels, best_inertia, best_centers, i + 1
class SphericalKMeans(KMeans):
"""Spherical K-Means clustering
Modfication of sklearn.cluster.KMeans where cluster centers are normalized
(projected onto the sphere) in each iteration.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
"""
def __init__(
self,
n_clusters=8,
init="k-means++",
n_init=10,
max_iter=300,
tol=1e-4,
n_jobs=1,
verbose=0,
random_state=None,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.normalize = normalize
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
"""
if self.normalize:
X = normalize(X)
random_state = check_random_state(self.random_state)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means(
X,
n_clusters=self.n_clusters,
sample_weight=sample_weight,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
random_state=random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True,
)
return self
|
jasonlaska/spherecluster | spherecluster/spherical_kmeans.py | SphericalKMeans.fit | python | def fit(self, X, y=None, sample_weight=None):
if self.normalize:
X = normalize(X)
random_state = check_random_state(self.random_state)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means(
X,
n_clusters=self.n_clusters,
sample_weight=sample_weight,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
random_state=random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True,
)
return self | Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None) | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/spherical_kmeans.py#L329-L366 | [
"def spherical_k_means(\n X,\n n_clusters,\n sample_weight=None,\n init=\"k-means++\",\n n_init=10,\n max_iter=300,\n verbose=False,\n tol=1e-4,\n random_state=None,\n copy_x=True,\n n_jobs=1,\n algorithm=\"auto\",\n return_n_iter=False,\n):\n \"\"\"Modified from sklearn.cl... | class SphericalKMeans(KMeans):
"""Spherical K-Means clustering
Modfication of sklearn.cluster.KMeans where cluster centers are normalized
(projected onto the sphere) in each iteration.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
"""
def __init__(
self,
n_clusters=8,
init="k-means++",
n_init=10,
max_iter=300,
tol=1e-4,
n_jobs=1,
verbose=0,
random_state=None,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.normalize = normalize
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _inertia_from_labels | python | def _inertia_from_labels(X, centers, labels):
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia) | Compute inertia with cosine distance using known labels. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L25-L33 | null | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _labels_inertia | python | def _labels_inertia(X, centers):
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia) | Compute labels and inertia with cosine distance. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L36-L53 | null | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _vmf_log | python | def _vmf_log(X, kappa, mu):
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T)) | Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L56-L63 | [
"def _vmf_normalize(kappa, dim):\n \"\"\"Compute normalization constant using built-in numpy/scipy Bessel\n approximations.\n\n Works well on small kappa and mu.\n \"\"\"\n num = np.power(kappa, dim / 2. - 1.)\n\n if dim / 2. - 1. < 1e-15:\n denom = np.power(2. * np.pi, dim / 2.) * i0(kappa... | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _vmf_normalize | python | def _vmf_normalize(kappa, dim):
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom | Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L66-L88 | null | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _log_H_asymptotic | python | def _log_H_asymptotic(nu, kappa):
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
) | Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L91-L102 | null | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _S | python | def _S(kappa, alpha, beta):
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale | Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L105-L124 | null | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _vmf_log_asymptotic | python | def _vmf_log_asymptotic(X, kappa, mu):
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm | Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L127-L143 | [
"def _log_H_asymptotic(nu, kappa):\n \"\"\"Compute the Amos-type upper bound asymptotic approximation on H where\n log(H_\\nu)(\\kappa) = \\int_0^\\kappa R_\\nu(t) dt.\n\n See \"lH_asymptotic <-\" in movMF.R and utility function implementation notes\n from https://cran.r-project.org/web/packages/movMF/i... | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _init_unit_centers | python | def _init_unit_centers(X, n_clusters, random_state, init):
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers | Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L171-L252 | null | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _expectation | python | def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior | Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples] | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L255-L293 | null | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _maximization | python | def _maximization(X, posterior, force_weights=None):
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations | Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ] | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L296-L354 | null | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | _movMF | python | def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia | Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L357-L497 | [
"def _maximization(X, posterior, force_weights=None):\n \"\"\"Estimate new centers, weights, and concentrations from\n\n Parameters\n ----------\n posterior : array, [n_centers, n_examples]\n The posterior matrix from the expectation step.\n\n force_weights : None or array, [n_centers, ]\n ... | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
)
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | movMF | python | def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
) | Wrapper for parallelization of _movMF and running n_init times. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L500-L614 | [
"def _movMF(\n X,\n n_clusters,\n posterior_type=\"soft\",\n force_weights=None,\n max_iter=300,\n verbose=False,\n init=\"random-class\",\n random_state=None,\n tol=1e-6,\n):\n \"\"\"Mixture of von Mises Fisher clustering.\n\n Implements the algorithms (i) and (ii) from\n\n \"... | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.special import iv # modified Bessel function of first kind, I_v
from numpy import i0 # modified Bessel function of first kind order 0, I_0
from scipy.special import logsumexp
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster.k_means_ import _init_centroids, _tolerance, _validate_center_shape
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, check_random_state, as_float_array
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import squared_norm
from sklearn.metrics.pairwise import cosine_distances
from sklearn.externals.joblib import Parallel, delayed
from . import spherical_kmeans
MAX_CONTENTRATION = 1e10
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia)
def _labels_inertia(X, centers):
"""Compute labels and inertia with cosine distance.
"""
n_examples, n_features = X.shape
n_clusters, n_features = centers.shape
labels = np.zeros((n_examples,))
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
dists = np.zeros((n_clusters,))
for cc in range(n_clusters):
dists[cc] = 1 - X[ee, :].dot(centers[cc, :].T)
labels[ee] = np.argmin(dists)
inertia[ee] = dists[int(labels[ee])]
return labels, np.sum(inertia)
def _vmf_log(X, kappa, mu):
"""Computs log(vMF(X, kappa, mu)) using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
n_examples, n_features = X.shape
return np.log(_vmf_normalize(kappa, n_features) * np.exp(kappa * X.dot(mu).T))
def _vmf_normalize(kappa, dim):
"""Compute normalization constant using built-in numpy/scipy Bessel
approximations.
Works well on small kappa and mu.
"""
num = np.power(kappa, dim / 2. - 1.)
if dim / 2. - 1. < 1e-15:
denom = np.power(2. * np.pi, dim / 2.) * i0(kappa)
else:
denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa)
if np.isinf(num):
raise ValueError("VMF scaling numerator was inf.")
if np.isinf(denom):
raise ValueError("VMF scaling denominator was inf.")
if np.abs(denom) < 1e-15:
raise ValueError("VMF scaling denominator was 0.")
return num / denom
def _log_H_asymptotic(nu, kappa):
"""Compute the Amos-type upper bound asymptotic approximation on H where
log(H_\nu)(\kappa) = \int_0^\kappa R_\nu(t) dt.
See "lH_asymptotic <-" in movMF.R and utility function implementation notes
from https://cran.r-project.org/web/packages/movMF/index.html
"""
beta = np.sqrt((nu + 0.5) ** 2)
kappa_l = np.min([kappa, np.sqrt((3. * nu + 11. / 2.) * (nu + 3. / 2.))])
return _S(kappa, nu + 0.5, beta) + (
_S(kappa_l, nu, nu + 2.) - _S(kappa_l, nu + 0.5, beta)
)
def _S(kappa, alpha, beta):
"""Compute the antiderivative of the Amos-type bound G on the modified
Bessel function ratio.
Note: Handles scalar kappa, alpha, and beta only.
See "S <-" in movMF.R and utility function implementation notes from
https://cran.r-project.org/web/packages/movMF/index.html
"""
kappa = 1. * np.abs(kappa)
alpha = 1. * alpha
beta = 1. * np.abs(beta)
a_plus_b = alpha + beta
u = np.sqrt(kappa ** 2 + beta ** 2)
if alpha == 0:
alpha_scale = 0
else:
alpha_scale = alpha * np.log((alpha + u) / a_plus_b)
return u - beta - alpha_scale
def _vmf_log_asymptotic(X, kappa, mu):
"""Compute log(f(x|theta)) via Amos approximation
log(f(x|theta)) = theta' x - log(H_{d/2-1})(\|theta\|)
where theta = kappa * X, \|theta\| = kappa.
Computing _vmf_log helps with numerical stability / loss of precision for
for large values of kappa and n_features.
See utility function implementation notes in movMF.R from
https://cran.r-project.org/web/packages/movMF/index.html
"""
n_examples, n_features = X.shape
log_vfm = kappa * X.dot(mu).T + -_log_H_asymptotic(n_features / 2. - 1., kappa)
return log_vfm
def _log_likelihood(X, centers, weights, concentrations):
if len(np.shape(X)) != 2:
X = X.reshape((1, len(X)))
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
return posterior
def _init_unit_centers(X, n_clusters, random_state, init):
"""Initializes unit norm centers.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
init: (string) one of
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
"""
n_examples, n_features = np.shape(X)
if isinstance(init, np.ndarray):
n_init_clusters, n_init_features = init.shape
assert n_init_clusters == n_clusters
assert n_init_features == n_features
# ensure unit normed centers
centers = init
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "spherical-k-means":
labels, inertia, centers, iters = spherical_kmeans._spherical_kmeans_single_lloyd(
X, n_clusters, x_squared_norms=np.ones((n_examples,)), init="k-means++"
)
return centers
elif init == "random":
centers = np.random.randn(n_clusters, n_features)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "k-means++":
centers = _init_centroids(
X,
n_clusters,
"k-means++",
random_state=random_state,
x_squared_norms=np.ones((n_examples,)),
)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
elif init == "random-orthonormal":
centers = np.random.randn(n_clusters, n_features)
q, r = np.linalg.qr(centers.T, mode="reduced")
return q.T
elif init == "random-class":
centers = np.zeros((n_clusters, n_features))
for cc in range(n_clusters):
while np.linalg.norm(centers[cc, :]) == 0:
labels = np.random.randint(0, n_clusters, n_examples)
centers[cc, :] = X[labels == cc, :].sum(axis=0)
for cc in range(n_clusters):
centers[cc, :] = centers[cc, :] / np.linalg.norm(centers[cc, :])
return centers
def _expectation(X, centers, weights, concentrations, posterior_type="soft"):
"""Compute the log-likelihood of each datapoint being in each cluster.
Parameters
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
Returns
----------
posterior : array, [n_centers, n_examples]
"""
n_examples, n_features = np.shape(X)
n_clusters, _ = centers.shape
if n_features <= 50: # works up to about 50 before numrically unstable
vmf_f = _vmf_log
else:
vmf_f = _vmf_log_asymptotic
f_log = np.zeros((n_clusters, n_examples))
for cc in range(n_clusters):
f_log[cc, :] = vmf_f(X, concentrations[cc], centers[cc, :])
posterior = np.zeros((n_clusters, n_examples))
if posterior_type == "soft":
weights_log = np.log(weights)
posterior = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[:, ee] = np.exp(posterior[:, ee] - logsumexp(posterior[:, ee]))
elif posterior_type == "hard":
weights_log = np.log(weights)
weighted_f_log = np.tile(weights_log.T, (n_examples, 1)).T + f_log
for ee in range(n_examples):
posterior[np.argmax(weighted_f_log[:, ee]), ee] = 1.0
return posterior
def _maximization(X, posterior, force_weights=None):
"""Estimate new centers, weights, and concentrations from
Parameters
----------
posterior : array, [n_centers, n_examples]
The posterior matrix from the expectation step.
force_weights : None or array, [n_centers, ]
If None is passed, will estimate weights.
If an array is passed, will use instead of estimating.
Returns
----------
centers (mu) : array, [n_centers x n_features]
weights (alpha) : array, [n_centers, ] (alpha)
concentrations (kappa) : array, [n_centers, ]
"""
n_examples, n_features = X.shape
n_clusters, n_examples = posterior.shape
concentrations = np.zeros((n_clusters,))
centers = np.zeros((n_clusters, n_features))
if force_weights is None:
weights = np.zeros((n_clusters,))
for cc in range(n_clusters):
# update weights (alpha)
if force_weights is None:
weights[cc] = np.mean(posterior[cc, :])
else:
weights = force_weights
# update centers (mu)
X_scaled = X.copy()
if sp.issparse(X):
X_scaled.data *= posterior[cc, :].repeat(np.diff(X_scaled.indptr))
else:
for ee in range(n_examples):
X_scaled[ee, :] *= posterior[cc, ee]
centers[cc, :] = X_scaled.sum(axis=0)
# normalize centers
center_norm = np.linalg.norm(centers[cc, :])
if center_norm > 1e-8:
centers[cc, :] = centers[cc, :] / center_norm
# update concentration (kappa) [TODO: add other kappa approximations]
rbar = center_norm / (n_examples * weights[cc])
concentrations[cc] = rbar * n_features - np.power(rbar, 3.)
if np.abs(rbar - 1.0) < 1e-10:
concentrations[cc] = MAX_CONTENTRATION
else:
concentrations[cc] /= 1. - np.power(rbar, 2.)
# let python know we can free this (good for large dense X)
del X_scaled
return centers, weights, concentrations
def _movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
):
"""Mixture of von Mises Fisher clustering.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
"""
random_state = check_random_state(random_state)
n_examples, n_features = np.shape(X)
# init centers (mus)
centers = _init_unit_centers(X, n_clusters, random_state, init)
# init weights (alphas)
if force_weights is None:
weights = np.ones((n_clusters,))
weights = weights / np.sum(weights)
else:
weights = force_weights
# init concentrations (kappas)
concentrations = np.ones((n_clusters,))
if verbose:
print("Initialization complete")
for iter in range(max_iter):
centers_prev = centers.copy()
# expectation step
posterior = _expectation(
X, centers, weights, concentrations, posterior_type=posterior_type
)
# maximization step
centers, weights, concentrations = _maximization(
X, posterior, force_weights=force_weights
)
# check convergence
tolcheck = squared_norm(centers_prev - centers)
if tolcheck <= tol:
if verbose:
print(
"Converged at iteration %d: "
"center shift %e within tolerance %e" % (iter, tolcheck, tol)
)
break
# labels come for free via posterior
labels = np.zeros((n_examples,))
for ee in range(n_examples):
labels[ee] = np.argmax(posterior[:, ee])
inertia = _inertia_from_labels(X, centers, labels)
return centers, weights, concentrations, posterior, labels, inertia
class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | VonMisesFisherMixture._check_fit_data | python | def _check_fit_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X | Verify that the number of samples given is larger than k | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L772-L791 | null | class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | VonMisesFisherMixture.fit | python | def fit(self, X, y=None):
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self | Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features) | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L814-L850 | [
"def movMF(\n X,\n n_clusters,\n posterior_type=\"soft\",\n force_weights=None,\n n_init=10,\n n_jobs=1,\n max_iter=300,\n verbose=False,\n init=\"random-class\",\n random_state=None,\n tol=1e-6,\n copy_x=True,\n):\n \"\"\"Wrapper for parallelization of _movMF and running n_in... | class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | VonMisesFisherMixture.transform | python | def transform(self, X, y=None):
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X) | Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L869-L890 | null | class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | VonMisesFisherMixture.predict | python | def predict(self, X):
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0] | Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L896-L920 | [
"def _labels_inertia(X, centers):\n \"\"\"Compute labels and inertia with cosine distance.\n \"\"\"\n n_examples, n_features = X.shape\n n_clusters, n_features = centers.shape\n\n labels = np.zeros((n_examples,))\n inertia = np.zeros((n_examples,))\n\n for ee in range(n_examples):\n dist... | class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def score(self, X, y=None):
"""Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | VonMisesFisherMixture.score | python | def score(self, X, y=None):
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return -_labels_inertia(X, self.cluster_centers_)[1] | Inertia score (sum of all distances to closest cluster).
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Larger score is better. | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L922-L940 | [
"def _labels_inertia(X, centers):\n \"\"\"Compute labels and inertia with cosine distance.\n \"\"\"\n n_examples, n_features = X.shape\n n_clusters, n_features = centers.shape\n\n labels = np.zeros((n_examples,))\n inertia = np.zeros((n_examples,))\n\n for ee in range(n_examples):\n dist... | class VonMisesFisherMixture(BaseEstimator, ClusterMixin, TransformerMixin):
"""Estimator for Mixture of von Mises Fisher clustering on the unit sphere.
Implements the algorithms (i) and (ii) from
"Clustering on the Unit Hypersphere using von Mises-Fisher Distributions"
by Banerjee, Dhillon, Ghosh, and Sra.
TODO: Currently only supports Banerjee et al 2005 approximation of kappa,
however, there are numerous other approximations see _update_params.
Attribution
----------
Approximation of log-vmf distribution function from movMF R-package.
movMF: An R Package for Fitting Mixtures of von Mises-Fisher Distributions
by Kurt Hornik, Bettina Grun, 2014
Find more at:
https://cran.r-project.org/web/packages/movMF/vignettes/movMF.pdf
https://cran.r-project.org/web/packages/movMF/index.html
Basic sklearn scaffolding from sklearn.cluster.KMeans.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
posterior_type: 'soft' or 'hard'
Type of posterior computed in exepectation step.
See note about attribute: self.posterior_
force_weights : None or array [n_clusters, ]
If None, the algorithm will estimate the weights.
If an array of weights, algorithm will estimate concentrations and
centers with given weights.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init: (string) one of
random-class [default]: random class assignment & centroid computation
k-means++ : uses sklearn k-means++ initialization algorithm
spherical-k-means : use centroids from one pass of spherical k-means
random : random unit norm vectors
random-orthonormal : random orthonormal vectors
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-6
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
normalize : boolean, default True
Normalize the input to have unnit norm.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
weights_ : array, [n_clusters,]
Weights of each cluster in vMF distribution (alpha).
concentrations_ : array [n_clusters,]
Concentration parameter for each cluster (kappa).
Larger values correspond to more concentrated clusters.
posterior_ : array, [n_clusters, n_examples]
Each column corresponds to the posterio distribution for and example.
If posterior_type='hard' is used, there will only be one non-zero per
column, its index corresponding to the example's cluster label.
If posterior_type='soft' is used, this matrix will be dense and the
column values correspond to soft clustering weights.
"""
def __init__(
self,
n_clusters=5,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
normalize=True,
):
self.n_clusters = n_clusters
self.posterior_type = posterior_type
self.force_weights = force_weights
self.n_init = n_init
self.n_jobs = n_jobs
self.max_iter = max_iter
self.verbose = verbose
self.init = init
self.random_state = random_state
self.tol = tol
self.copy_x = copy_x
self.normalize = normalize
def _check_force_weights(self):
if self.force_weights is None:
return
if len(self.force_weights) != self.n_clusters:
raise ValueError(
(
"len(force_weights)={} but must equal "
"n_clusters={}".format(len(self.force_weights), self.n_clusters)
)
)
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if X.shape[0] < self.n_clusters:
raise ValueError(
"n_samples=%d should be >= n_clusters=%d"
% (X.shape[0], self.n_clusters)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError(
"Incorrect number of features. "
"Got %d features, expected %d" % (n_features, expected_n_features)
)
for ee in range(n_samples):
if sp.issparse(X):
n = sp.linalg.norm(X[ee, :])
else:
n = np.linalg.norm(X[ee, :])
if np.abs(n - 1.) > 1e-4:
raise ValueError("Data l2-norm must be 1, found {}".format(n))
return X
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the cosine distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return cosine_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Note: Does not check that each point is on the sphere.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
if self.normalize:
X = normalize(X)
check_is_fitted(self, "cluster_centers_")
X = self._check_test_data(X)
return _labels_inertia(X, self.cluster_centers_)[0]
def log_likelihood(self, X):
check_is_fitted(self, "cluster_centers_")
return _log_likelihood(
X, self.cluster_centers_, self.weights_, self.concentrations_
)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | linestrings_intersect | python | def linestrings_intersect(line1, line2):
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects | To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L4-L39 | null | import math
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | _bbox_around_polycoords | python | def _bbox_around_polycoords(coords):
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)] | bounding box | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L42-L53 | null | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | _point_in_bbox | python | def _point_in_bbox(point, bounds):
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3]) | valid whether the point is inside the bounding box | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L56-L61 | null | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | _pnpoly | python | def _pnpoly(x, y, coords):
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside | the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L64-L89 | null | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | point_in_polygon | python | def point_in_polygon(point, poly):
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords) | valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L111-L123 | [
"def _point_in_polygon(point, coords):\n inside_box = False\n for coord in coords:\n if inside_box:\n break\n if _point_in_bbox(point, _bbox_around_polycoords(coord)):\n inside_box = True\n if not inside_box:\n return False\n\n inside_poly = False\n for coor... | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | point_in_multipolygon | python | def point_in_multipolygon(point, multipoly):
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False | valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L126-L143 | [
"def _point_in_polygon(point, coords):\n inside_box = False\n for coord in coords:\n if inside_box:\n break\n if _point_in_bbox(point, _bbox_around_polycoords(coord)):\n inside_box = True\n if not inside_box:\n return False\n\n inside_poly = False\n for coor... | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | draw_circle | python | def draw_circle(radius_in_meters, center_point, steps=15):
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]} | get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L170-L194 | [
"def number2radius(number):\n \"\"\"\n convert degree into radius\n\n Keyword arguments:\n number -- degree\n\n return radius\n \"\"\"\n return number * math.pi / 180\n",
"def number2degree(number):\n \"\"\"\n convert radius into degree\n\n Keyword arguments:\n number -- radius\n\... | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | rectangle_centroid | python | def rectangle_centroid(rectangle):
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]} | get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L197-L213 | null | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | point_distance | python | def point_distance(point1, point2):
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000 | calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L216-L236 | [
"def number2radius(number):\n \"\"\"\n convert degree into radius\n\n Keyword arguments:\n number -- degree\n\n return radius\n \"\"\"\n return number * math.pi / 180\n"
] | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | point_distance_ellipsode | python | def point_distance_ellipsode(point1,point2):
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon) | calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L238-L261 | [
"def number2radius(number):\n \"\"\"\n convert degree into radius\n\n Keyword arguments:\n number -- degree\n\n return radius\n \"\"\"\n return number * math.pi / 180\n"
] | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | geometry_within_radius | python | def geometry_within_radius(geometry, center, radius):
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True | To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L264-L286 | [
"def point_distance(point1, point2):\n \"\"\"\n calculate the distance between two points on the sphere like google map\n reference http://www.movable-type.co.uk/scripts/latlong.html\n\n Keyword arguments:\n point1 -- point one geojson object\n point2 -- point two geojson object\n\n return di... | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | area | python | def area(poly):
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area | calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L289-L315 | null | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
brandonxiang/geojson-python-utils | geojson_utils/geojson_utils.py | centroid | python | def centroid(poly):
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]} | get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid | train | https://github.com/brandonxiang/geojson-python-utils/blob/33d0dcd5f16e0567b48c0d49fd292a4f1db16b41/geojson_utils/geojson_utils.py#L318-L348 | [
"def area(poly):\n \"\"\"\n calculate the area of polygon\n\n Keyword arguments:\n poly -- polygon geojson object\n\n return polygon area\n \"\"\"\n poly_area = 0\n # TODO: polygon holes at coordinates[1]\n points = poly['coordinates'][0]\n j = len(points) - 1\n count = len(points)\... | import math
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two points on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.