repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
drericstrong/pyedna | pyedna/ezdna.py | GetHist | python | def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df | Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L226-L309 | [
"def DoesIDExist(tag_name):\n \"\"\"\n Determines if a fully-qualified site.service.tag eDNA tag exists\n in any of the connected services.\n\n :param tag_name: fully-qualified (site.service.tag) eDNA tag\n :return: true if the point exists, false if the point does not exist\n\n Example:\n\n >>> DoesIDExist(\"Site.Service.Tag\")\n\n \"\"\"\n # the eDNA API requires that the tag_name be specified in a binary format,\n # and the ctypes library must be used to create a C++ variable type.\n szPoint = c_char_p(tag_name.encode('utf-8'))\n result = bool(dna_dll.DoesIdExist(szPoint))\n return result\n",
"def StringToUTCTime(time_string):\n \"\"\"\n Turns a DateTime string into UTC time.\n\n :param time_string: Must be the format \"MM/dd/yy hh:mm:ss\"\n :return: an integer representing the UTC int format\n \"\"\"\n szTime = c_char_p(time_string.encode('utf-8'))\n res = dna_dll.StringToUTCTime(szTime)\n return res\n",
"def _GetLabel(tag_name):\n # This function tries to get the tag description to use as the label for\n # the variable in the pandas DataFrame. It removes any special characters\n # and trims whitespace before and after. If the label is blank, the\n # tag name will be returned again instead.\n label = GetTagDescription(tag_name)\n if label:\n return label\n else:\n return tag_name\n"
] | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | GetMultipleTags | python | def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df | Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L370-L443 | [
"def GetHist(tag_name, start_time, end_time, period=5, mode=\"raw\",\n desc_as_label=False, label=None, high_speed=False, utc=False):\n \"\"\"\n Retrieves data from eDNA history for a given tag.\n\n :param tag_name: fully-qualified (site.service.tag) eDNA tag\n :param start_time: must be in format mm/dd/yy hh:mm:ss\n :param end_time: must be in format mm/dd/yy hh:mm:ss\n :param period: specify the number of seconds for the pull interval\n :param mode: \"raw\", \"snap\", \"avg\", \"interp\", \"max\", \"min\"\n See eDNA documentation for more information.\n :param desc_as_label: use the tag description as the column name instead\n of the full tag\n :param label: supply a custom label to use as the DataFrame column name\n :param high_speed: if True, pull millisecond data\n :param utc: if True, use the integer time format instead of DateTime\n :return: a pandas DataFrame with timestamp, value, and status\n \"\"\"\n # Check if the point even exists\n if not DoesIDExist(tag_name):\n warnings.warn(\"WARNING- \" + tag_name + \" does not exist or \" +\n \"connection was dropped. Try again if tag does exist.\")\n return pd.DataFrame()\n\n # Define all required variables in the correct ctypes format\n szPoint = c_char_p(tag_name.encode('utf-8'))\n tStart = c_long(StringToUTCTime(start_time))\n tEnd = c_long(StringToUTCTime(end_time))\n tPeriod = c_long(period)\n pulKey = c_ulong(0)\n\n # Initialize the data pull using the specified pulKey, which is an\n # identifier that tells eDNA which data pull is occurring\n mode = mode.lower().strip()\n if not high_speed:\n if mode == \"avg\":\n nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"interp\":\n nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"min\":\n nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"max\":\n nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n if mode == \"snap\":\n nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))\n else:\n nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))\n time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)\n else:\n nStartMillis = c_ushort(0)\n nEndMillis = c_ushort(0)\n nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,\n tEnd, nEndMillis, byref(pulKey))\n time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)\n\n # The history request must be cancelled to free up network resources\n dna_dll.DnaCancelHistRequest(pulKey)\n\n # To construct the pandas DataFrame, the tag name will be used as the\n # column name, and the index (which is in the strange eDNA format) must be\n # converted to an actual DateTime\n d = {tag_name + ' Status': stat, tag_name: val}\n df = pd.DataFrame(data=d, index=time_)\n if not utc:\n if not high_speed:\n df.index = pd.to_datetime(df.index, unit=\"s\")\n else:\n df.index = pd.to_datetime(df.index, unit=\"ms\")\n if df.empty:\n warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +\n 'Check eDNA connection, ensure that the start time is ' +\n 'not later than the end time, verify that the ' +\n 'DateTime formatting matches eDNA requirements, and ' +\n 'check that data exists in the query time period.')\n\n # Check if the user would rather use the description as the column name\n if desc_as_label or label:\n if label:\n new_label = label\n else:\n new_label = _GetLabel(tag_name)\n df.rename(inplace=True, columns={tag_name: new_label,\n tag_name + \" Status\": new_label + \" Status\"})\n return df\n",
"def _GetLabel(tag_name):\n # This function tries to get the tag description to use as the label for\n # the variable in the pandas DataFrame. It removes any special characters\n # and trims whitespace before and after. If the label is blank, the\n # tag name will be returned again instead.\n label = GetTagDescription(tag_name)\n if label:\n return label\n else:\n return tag_name\n"
] | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | GetPoints | python | def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df | Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units] | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L458-L508 | [
"def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):\n # Returns an array of properly-formatted points from the GetPoints function\n tag = _format_str(szPoint.value.decode(errors='ignore'))\n value = pdValue.value\n time_ = _format_str(szTime.value.decode(errors='ignore'))\n status = _format_str(szStatus.value.decode(errors='ignore'))\n desc = _format_str(szDesc.value.decode(errors='ignore'))\n units = _format_str(szUnits.value.decode(errors='ignore'))\n if szPoint.value.strip():\n return [tag, value, time_, status, desc, units]\n"
] | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | GetRTFull | python | def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None | Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L511-L547 | [
"def DoesIDExist(tag_name):\n \"\"\"\n Determines if a fully-qualified site.service.tag eDNA tag exists\n in any of the connected services.\n\n :param tag_name: fully-qualified (site.service.tag) eDNA tag\n :return: true if the point exists, false if the point does not exist\n\n Example:\n\n >>> DoesIDExist(\"Site.Service.Tag\")\n\n \"\"\"\n # the eDNA API requires that the tag_name be specified in a binary format,\n # and the ctypes library must be used to create a C++ variable type.\n szPoint = c_char_p(tag_name.encode('utf-8'))\n result = bool(dna_dll.DoesIdExist(szPoint))\n return result\n"
] | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | GetServices | python | def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df | Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status] | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L559-L606 | [
"def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):\n # Returns an array of properly-formatted services from the\n # GetServices function\n name = _format_str(szSvcName.value.decode(errors='ignore'))\n desc = _format_str(szSvcDesc.value.decode(errors='ignore'))\n type_ = _format_str(szSvcType.value.decode(errors='ignore'))\n status = _format_str(szSvcStat.value.decode(errors='ignore'))\n if name:\n return [name, desc, type_, status]\n"
] | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | GetTagDescription | python | def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None | Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L609-L642 | [
"def DoesIDExist(tag_name):\n \"\"\"\n Determines if a fully-qualified site.service.tag eDNA tag exists\n in any of the connected services.\n\n :param tag_name: fully-qualified (site.service.tag) eDNA tag\n :return: true if the point exists, false if the point does not exist\n\n Example:\n\n >>> DoesIDExist(\"Site.Service.Tag\")\n\n \"\"\"\n # the eDNA API requires that the tag_name be specified in a binary format,\n # and the ctypes library must be used to create a C++ variable type.\n szPoint = c_char_p(tag_name.encode('utf-8'))\n result = bool(dna_dll.DoesIdExist(szPoint))\n return result\n"
] | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | HistAppendValues | python | def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError) | Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'. | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L645-L686 | null | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | SelectPoint | python | def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result | Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L729-L741 | null | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/ezdna.py | StringToUTCTime | python | def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res | Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/ezdna.py#L744-L753 | null | # -*- coding: utf-8 -*-
"""
pyedna.ezdna
--------------
This module contains "easy" versions of common functions from the eDNA
C++ dll. Obtain a legal copy of the C++ eDNA dll for use.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
# Note- all functions are in CamelCase to match the original eDNA function
# names, even though this does not follow PEP 8.
import re
import os
import numba
import warnings
import numpy as np
import pandas as pd
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_ushort, c_long, c_ulong
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dna_dll = Mock()
attrs = {'DnaGetHistAvgUTC.return_value': c_ulong(1),
'DnaGetHistInterpUTC.return_value': c_ulong(1),
'DnaGetHistMinUTC.return_value': c_ulong(1),
'DnaGetHistMaxUTC.return_value': c_ulong(1),
'DnaGetHistSnapUTC.return_value': c_ulong(1),
'DnaGetHistRawUTC.return_value': c_ulong(1),
'DoesIdExist.return_value': c_ulong(1),
'DnaGetHSHistRawUTC.return_value': c_ulong(1),
'DnaGetNextHSHistUTC.return_value': c_ulong(1),
'DnaGetPointEntry.return_value': c_ulong(1),
'DnaGetNextPointEntry.return_value': c_ulong(1),
'DNAGetRTFull.return_value': c_ulong(1),
'DnaSelectPoint.return_value': c_ulong(1),
'StringToUTCTime.return_value': 1,
'DnaGetServiceEntry.return_value': c_ulong(1),
'DnaGetNextServiceEntry.return_value': c_ulong(1),
'DnaHistAppendValues.return_value': c_ulong(1),
'DnaHistUpdateInsertValues.return_value': c_ulong(1),
'DnaCancelHistRequest.return_value': None,
'DnaGetNextHistSmallUTC.return_value': c_ulong(1)}
dna_dll.configure_mock(**attrs)
return dna_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dna_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dna_dll = _mock_edna()
# If the EzDnaApi file is not in the default location, the user must explicitly
# load it using the LoadDll function.
def LoadDll(location):
"""
If the EzDnaApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EzDnaApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: the full location of EzDnaApi64.dll, including filename
"""
if os.path.isfile(location):
global dna_dll
dna_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def _format_str(text):
# Only allows a-z, 0-9, ., _, :, /, -, and spaces
if type(text) is str:
formatted_text = re.sub('[^-._:/\sA-Za-z0-9]+', '', text).strip()
return formatted_text
else:
return text
def DoesIDExist(tag_name):
"""
Determines if a fully-qualified site.service.tag eDNA tag exists
in any of the connected services.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: true if the point exists, false if the point does not exist
Example:
>>> DoesIDExist("Site.Service.Tag")
"""
# the eDNA API requires that the tag_name be specified in a binary format,
# and the ctypes library must be used to create a C++ variable type.
szPoint = c_char_p(tag_name.encode('utf-8'))
result = bool(dna_dll.DoesIdExist(szPoint))
return result
def GetHistAvg(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
averaged over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="avg", period=period,
desc_as_label=desc_as_label, label=label)
def GetHistInterp(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
linearly interpolated over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="interp",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMax(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The maximum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="max",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistMin(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The minimum of the data
will be found over the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="min",
period=period, desc_as_label=desc_as_label, label=label)
def GetHistRaw(tag_name, start_time, end_time, high_speed=False,
desc_as_label=False, label=None):
"""
Retrieves raw data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param high_speed: true = pull milliseconds
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="raw",
desc_as_label=desc_as_label, label=label)
def GetHistSnap(tag_name, start_time, end_time, period,
desc_as_label=False, label=None):
"""
Retrieves data from eDNA history for a given tag. The data will be
snapped to the last known value over intervals of the specified "period".
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: in units of seconds (e.g. 10)
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:return: a pandas DataFrame with timestamp, value, and status
"""
return GetHist(tag_name, start_time, end_time, mode="snap",
period=period, desc_as_label=desc_as_label, label=label)
def GetHist(tag_name, start_time, end_time, period=5, mode="raw",
desc_as_label=False, label=None, high_speed=False, utc=False):
"""
Retrieves data from eDNA history for a given tag.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param period: specify the number of seconds for the pull interval
:param mode: "raw", "snap", "avg", "interp", "max", "min"
See eDNA documentation for more information.
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param label: supply a custom label to use as the DataFrame column name
:param high_speed: if True, pull millisecond data
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp, value, and status
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return pd.DataFrame()
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
tStart = c_long(StringToUTCTime(start_time))
tEnd = c_long(StringToUTCTime(end_time))
tPeriod = c_long(period)
pulKey = c_ulong(0)
# Initialize the data pull using the specified pulKey, which is an
# identifier that tells eDNA which data pull is occurring
mode = mode.lower().strip()
if not high_speed:
if mode == "avg":
nRet = dna_dll.DnaGetHistAvgUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "interp":
nRet = dna_dll.DnaGetHistInterpUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "min":
nRet = dna_dll.DnaGetHistMinUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "max":
nRet = dna_dll.DnaGetHistMaxUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
if mode == "snap":
nRet = dna_dll.DnaGetHistSnapUTC(szPoint, tStart, tEnd, tPeriod, byref(pulKey))
else:
nRet = dna_dll.DnaGetHistRawUTC(szPoint, tStart, tEnd, byref(pulKey))
time_, val, stat = _GetNextHistSmallUTC(pulKey, nRet)
else:
nStartMillis = c_ushort(0)
nEndMillis = c_ushort(0)
nRet = dna_dll.DnaGetHSHistRawUTC(szPoint, tStart, nStartMillis,
tEnd, nEndMillis, byref(pulKey))
time_, val, stat = _GetNextHSHistUTC(pulKey, nRet)
# The history request must be cancelled to free up network resources
dna_dll.DnaCancelHistRequest(pulKey)
# To construct the pandas DataFrame, the tag name will be used as the
# column name, and the index (which is in the strange eDNA format) must be
# converted to an actual DateTime
d = {tag_name + ' Status': stat, tag_name: val}
df = pd.DataFrame(data=d, index=time_)
if not utc:
if not high_speed:
df.index = pd.to_datetime(df.index, unit="s")
else:
df.index = pd.to_datetime(df.index, unit="ms")
if df.empty:
warnings.warn('WARNING- No data retrieved for ' + tag_name + '. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
# Check if the user would rather use the description as the column name
if desc_as_label or label:
if label:
new_label = label
else:
new_label = _GetLabel(tag_name)
df.rename(inplace=True, columns={tag_name: new_label,
tag_name + " Status": new_label + " Status"})
return df
@numba.jit
def _GetNextHistSmallUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pusStatus = c_double(-9999), c_long(-9999), c_ushort(0)
refVal, refTime, refStat = byref(pdValue), byref(ptTime), byref(pusStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value)
stat = np.append(stat, pusStatus.value)
nRet = dna_dll.DnaGetNextHistSmallUTC(pulKey, refVal, refTime, refStat)
return time_, val, stat
@numba.jit
def _GetNextHSHistUTC(pulKey, nRet):
# This is a base function that iterates over a predefined history call,
# which may be raw, snap, max, min, etc.
pdValue, ptTime, pnMillis = c_double(-9999), c_long(-9999), c_ushort(0)
szStatus, nStatus = create_string_buffer(20), c_ushort(20)
refVal, refTime, refMillis = byref(pdValue), byref(ptTime), byref(pnMillis)
refStatus = byref(szStatus)
val = np.empty(0)
time_ = np.empty(0)
stat = np.empty(0)
# Once nRet is not zero, the function was terminated, either due to an
# error or due to the end of the data period.
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
while nRet == 0:
val = np.append(val, pdValue.value)
time_ = np.append(time_, ptTime.value * 1000 + pnMillis.value)
stat = np.append(stat, 3)
nRet = dna_dll.DnaGetNextHSHistUTC(pulKey, refVal, refTime, refMillis,
refStatus, nStatus)
return time_, val, stat
def _GetLabel(tag_name):
# This function tries to get the tag description to use as the label for
# the variable in the pandas DataFrame. It removes any special characters
# and trims whitespace before and after. If the label is blank, the
# tag name will be returned again instead.
label = GetTagDescription(tag_name)
if label:
return label
else:
return tag_name
def GetMultipleTags(tag_list, start_time, end_time, sampling_rate=None,
fill_limit=99999, verify_time=False, desc_as_label=False,
utc=False):
"""
Retrieves raw data from eDNA history for multiple tags, merging them into
a single DataFrame, and resampling the data according to the specified
sampling_rate.
:param tag_list: a list of fully-qualified (site.service.tag) eDNA tags
:param start_time: must be in format mm/dd/yy hh:mm:ss
:param end_time: must be in format mm/dd/yy hh:mm:ss
:param sampling_rate: in units of seconds
:param fill_limit: in units of data points
:param verify_time: verify that the time is not before or after the query
:param desc_as_label: use the tag description as the column name instead
of the full tag
:param utc: if True, use the integer time format instead of DateTime
:return: a pandas DataFrame with timestamp and values
"""
# Since we are pulling data from multiple tags, let's iterate over each
# one. For this case, we only want to pull data using the "raw" method,
# which will obtain all data as it is actually stored in the historian.
dfs = []
columns_names = []
for tag in tag_list:
df = GetHist(tag, start_time, end_time, utc=utc)
if not df.empty:
# Sometimes a duplicate index/value pair is retrieved from
# eDNA, which will cause the concat to fail if not removed
# df.drop_duplicates(inplace=True)
df = df[~df.index.duplicated(keep='first')]
# If the user wants to use descriptions as labels, we need to
# ensure that only unique labels are used
label = tag
if desc_as_label:
orig_label = _GetLabel(tag)
label = orig_label
rename_number = 2
while label in columns_names:
label = orig_label + str(rename_number)
rename_number += 1
columns_names.append(label)
df.rename(columns={tag: label}, inplace=True)
# Add the DataFrame to the list, to be concatenated later
dfs.append(pd.DataFrame(df[label]))
# Next, we concatenate all the DataFrames using an outer join (default).
# Verify integrity is slow, but it ensures that the concatenation
# worked correctly.
if dfs:
merged_df = pd.concat(dfs, axis=1, verify_integrity=True)
merged_df = merged_df.fillna(method="ffill", limit=fill_limit)
else:
warnings.warn('WARNING- No data retrieved for any tags. ' +
'Check eDNA connection, ensure that the start time is ' +
'not later than the end time, verify that the ' +
'DateTime formatting matches eDNA requirements, and ' +
'check that data exists in the query time period.')
return pd.DataFrame()
# eDNA sometimes pulls data too early or too far- let's filter out all
# the data that is not within our original criteria.
if verify_time:
start_np = pd.to_datetime(start_time)
end_np = pd.to_datetime(end_time)
mask = (merged_df.index > start_np) & (merged_df.index <= end_np)
merged_df = merged_df.loc[mask]
# Finally, we resample the data at the rate requested by the user.
if sampling_rate:
sampling_string = str(sampling_rate) + "S"
merged_df = merged_df.resample(sampling_string).fillna(
method="ffill", limit=fill_limit)
return merged_df
def _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits):
# Returns an array of properly-formatted points from the GetPoints function
tag = _format_str(szPoint.value.decode(errors='ignore'))
value = pdValue.value
time_ = _format_str(szTime.value.decode(errors='ignore'))
status = _format_str(szStatus.value.decode(errors='ignore'))
desc = _format_str(szDesc.value.decode(errors='ignore'))
units = _format_str(szUnits.value.decode(errors='ignore'))
if szPoint.value.strip():
return [tag, value, time_, status, desc, units]
def GetPoints(edna_service):
"""
Obtains all the points in the edna_service, including real-time values.
:param edna_service: The full Site.Service name of the eDNA service.
:return: A pandas DataFrame of points in the form [Tag, Value, Time,
Description, Units]
"""
# Define all required variables in the correct ctypes format
szServiceName = c_char_p(edna_service.encode('utf-8'))
nStarting, pulKey, pdValue = c_ushort(0), c_ulong(0), c_double(-9999)
szPoint, szTime = create_string_buffer(30), create_string_buffer(30)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(90)
szUnits = create_string_buffer(20)
szPoint2, szTime2 = create_string_buffer(30), create_string_buffer(30)
szStatus2, szDesc2 = create_string_buffer(20), create_string_buffer(90)
szUnits2, pdValue2 = create_string_buffer(20), c_double(-9999)
nPoint, nTime, nStatus = c_ushort(30), c_ushort(30), c_ushort(20)
nDesc, nUnits = c_ushort(90), c_ushort(20)
# Call the eDNA function. nRet is zero if the function is successful.
points = []
nRet = dna_dll.DnaGetPointEntry(szServiceName, nStarting, byref(pulKey),
byref(szPoint), nPoint, byref(pdValue), byref(szTime), nTime,
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
tag = _FormatPoints(szPoint, pdValue, szTime, szStatus, szDesc, szUnits)
if tag:
points.append(tag)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextPointEntry(pulKey, byref(szPoint2), nPoint,
byref(pdValue2), byref(szTime2), nTime, byref(szStatus2), nStatus,
byref(szDesc2), nDesc, byref(szUnits2), nUnits)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
tag = _FormatPoints(szPoint2, pdValue2, szTime2, szStatus2, szDesc2,
szUnits2)
if tag:
points.append(tag)
# If no results were returned, raise a warning
df = pd.DataFrame()
if points:
df = pd.DataFrame(points, columns=["Tag", "Value", "Time", "Status",
"Description", "Units"])
else:
warnings.warn("WARNING- No points were returned. Check that the " +
"service exists and contains points.")
return df
def GetRTFull(tag_name):
"""
Gets current information about a point configured in a real-time
eDNA service, including current value, time, status, description,
and units.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tuple of: alue, time, status, statusint, description, units
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# Define all required variables in the correct ctypes format
szPoint = c_char_p(tag_name.encode('utf-8'))
pdValue, ptTime = c_double(-9999), c_long(-9999)
szValue, szTime = create_string_buffer(20), create_string_buffer(20)
szStatus, szDesc = create_string_buffer(20), create_string_buffer(20)
szUnits = create_string_buffer(20)
nValue, nTime, nStatus = c_ushort(20), c_ushort(20), c_ushort(20)
pusStatus, nDesc, nUnits = c_ushort(0), c_ushort(0), c_ushort(0)
# Call the eDNA function. nRet is zero if the function is successful
nRet = dna_dll.DNAGetRTFull(szPoint, byref(pdValue), byref(szValue),
nValue, byref(ptTime), byref(szTime), nTime, byref(pusStatus),
byref(szStatus), nStatus, byref(szDesc), nDesc, byref(szUnits), nUnits)
# Check to make sure the function returned correctly. If not, return None
if nRet == 0:
return ([pdValue.value, szTime.value.decode('utf-8'),
szStatus.value.decode('utf-8'), pusStatus.value,
szDesc.value.decode('utf-8'), szUnits.value.decode('utf-8')])
else:
warnings.warn("WARNING- eDNA API failed with code " + str(nRet))
return None
def _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat):
# Returns an array of properly-formatted services from the
# GetServices function
name = _format_str(szSvcName.value.decode(errors='ignore'))
desc = _format_str(szSvcDesc.value.decode(errors='ignore'))
type_ = _format_str(szSvcType.value.decode(errors='ignore'))
status = _format_str(szSvcStat.value.decode(errors='ignore'))
if name:
return [name, desc, type_, status]
def GetServices():
"""
Obtains all the connected eDNA services.
:return: A pandas DataFrame of connected eDNA services in the form [Name,
Description, Type, Status]
"""
# Define all required variables in the correct ctypes format
pulKey = c_ulong(0)
szType = c_char_p("".encode('utf-8'))
szStartSvcName = c_char_p("".encode('utf-8'))
szSvcName, szSvcDesc = create_string_buffer(30), create_string_buffer(90)
szSvcType, szSvcStat = create_string_buffer(30), create_string_buffer(30)
szSvcName2, szSvcDesc2 = create_string_buffer(30), create_string_buffer(90)
szSvcType2, szSvcStat2 = create_string_buffer(30), create_string_buffer(30)
nSvcName, nSvcDesc = c_ushort(30), c_ushort(90)
nSvcType, nSvcStat = c_ushort(30), c_ushort(30)
# Call the eDNA function. nRet is zero if the function is successful.
services = []
nRet = dna_dll.DnaGetServiceEntry(szType, szStartSvcName, byref(pulKey),
byref(szSvcName), nSvcName, byref(szSvcDesc), nSvcDesc,
byref(szSvcType), nSvcType, byref(szSvcStat), nSvcStat)
serv = _FormatServices(szSvcName, szSvcDesc, szSvcType, szSvcStat)
if serv:
services.append(serv)
# Iterate across all the returned services
while nRet == 0:
nRet = dna_dll.DnaGetNextServiceEntry(pulKey,
byref(szSvcName2), nSvcName, byref(szSvcDesc2), nSvcDesc,
byref(szSvcType2), nSvcType, byref(szSvcStat2), nSvcStat)
# We want to ensure only UTF-8 characters are returned. Ignoring
# characters is slightly unsafe, but they should only occur in the
# units or description, so it's not a huge issue.
serv = _FormatServices(szSvcName2, szSvcDesc2, szSvcType2, szSvcStat2)
if serv:
services.append(serv)
# If no results were returned, raise a warning
df = pd.DataFrame()
if services:
df = pd.DataFrame(services, columns=["Name", "Description", "Type",
"Status"])
else:
warnings.warn("WARNING- No connected eDNA services detected. Check " +
"your DNASys.ini file and your network connection.")
return df
def GetTagDescription(tag_name):
"""
Gets the current description of a point configured in a real-time eDNA
service.
:param tag_name: fully-qualified (site.service.tag) eDNA tag
:return: tag description
"""
# Check if the point even exists
if not DoesIDExist(tag_name):
warnings.warn("WARNING- " + tag_name + " does not exist or " +
"connection was dropped. Try again if tag does exist.")
return None
# To get the point information for the service, we need the Site.Service
split_tag = tag_name.split(".")
# If the full Site.Service.Tag was not supplied, return the tag_name
if len(split_tag) < 3:
warnings.warn("WARNING- Please supply the full Site.Service.Tag.")
return tag_name
# The Site.Service will be the first two split strings
site_service = split_tag[0] + "." + split_tag[1]
# GetPoints will return a DataFrame with point information
points = GetPoints(site_service)
if tag_name in points.Tag.values:
description = points[points.Tag == tag_name].Description.values[0]
if description:
return description
else:
return tag_name
else:
warnings.warn("WARNING- " + tag_name + " not found in service.")
return None
def HistAppendValues(site_service, tag_name, times, values, statuses):
"""
Appends a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
This function will append values to history, only if they are LATER than
the current time of the last written data point. If this is not true, no
data will be appended.
This value is strongly preferred over HistUpdateInsertValues, which will
slow down data retrieval if it is used too often.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistAppendValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def HistUpdateInsertValues(site_service, tag_name, times, values, statuses):
"""
CAUTION- Use HistAppendValues instead of this function, unless you know
what you are doing.
Inserts a value to an eDNA history service. Take very careful note of the
following required parameters. Any deviation from this exact format WILL
cause the function to fail.
:param site_service: This is the history service for the eDNA tag, NOT
the site.service of the tag itself. For instance,
ANTARES.HISTORY, not ANTARES.ANVCALC
:param tag_name: This is the full site.service.tag. For instance,
ANTARES.ANVCALC.ADE1CA02
:param times: This is a Python array of times in UTC Epoch format.
For example, "1483926416" not "2016/01/01 01:01:01".
This must be an array.
:param values: A Python array of data point values for each times.
:param statuses: The status of the point. Refer to eDNA documentation
for more information. Usually use '3', which is 'OK'.
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPoint = c_char_p(tag_name.encode('utf-8'))
nCount = c_ushort(1)
# Iterate over each user-supplied data point
for dttime, value, status in zip(times, values, statuses):
# Define all required variables in the correct ctypes format
PtTimeList = c_long(dttime)
PusStatusList = c_ushort(status)
PszValueList = c_char_p(str(value).encode('utf-8'))
szError = create_string_buffer(20)
nError = c_ushort(20)
# Call the history append file
nRet = dna_dll.DnaHistUpdateInsertValues(szService, szPoint,
nCount, byref(PtTimeList), byref(PusStatusList),
byref(PszValueList), byref(szError), nError)
def SelectPoint():
"""
Opens an eDNA point picker, where the user can select a single tag.
:return: selected tag name
"""
# Define all required variables in the correct ctypes format
pszPoint = create_string_buffer(20)
nPoint = c_ushort(20)
# Opens the point picker
dna_dll.DnaSelectPoint(byref(pszPoint), nPoint)
tag_result = pszPoint.value.decode('utf-8')
return tag_result
def StringToUTCTime(time_string):
"""
Turns a DateTime string into UTC time.
:param time_string: Must be the format "MM/dd/yy hh:mm:ss"
:return: an integer representing the UTC int format
"""
szTime = c_char_p(time_string.encode('utf-8'))
res = dna_dll.StringToUTCTime(szTime)
return res
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/serv.py | AddAnalogShortIdRecordNoStatus | python | def AddAnalogShortIdRecordNoStatus(site_service, tag, time_value, value):
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecordNoStatus(szService, szPointId,
tTime, dValue)
return nRet | This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:return: 0, if the data push is successful | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/serv.py#L109-L130 | null | # -*- coding: utf-8 -*-
"""
pyedna.serv
-------------
This module contains functions within the EzDnaServApi, mainly used for
direct interaction with eDNA services, such as pushing data in real-time.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
import os
import warnings
import pyedna.ezdna as dna
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_short, c_ushort, c_long, c_int
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dnaserv_dll = Mock()
attrs = {'DnaAddAnalogShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdMsecRecord.return_value': 1,
'DnaFlushShortIdRecords.return_value': 1}
dnaserv_dll.configure_mock(**attrs)
return dnaserv_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dnaserv_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dnaserv_dll = _mock_edna()
def LoadDll(location):
"""
If the EZDnaServApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EZDnaServApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: full location of EZDnaServApi64.dll, including filename
"""
if os.path.isfile(location):
global dnaserv_dll
dnaserv_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def AddAnalogShortIdRecord(site_service, tag, time_value, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual)
return nRet
def AddDigitalShortIdRecord(site_service, tag, time_value, value,
status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
# TODO- check if the string is exactly 16 characters and convert
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual)
return nRet
def AddAnalogShortIdMsecRecord(site_service, tag, time_value, msec, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual, usMsec)
return nRet
def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec,
value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService,
szPointId, tTime, dValue, usMsec)
return nRet
def AddDigitalShortIdMsecRecord(site_service, tag, time_value, msec,
value, status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdMsecRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual, usMsec)
return nRet
def FlushShortIdRecords(site_service):
"""
Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
# Try to flush the data. Function will return message regarding success.
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
return str(nRet) + szMessage.value.decode('utf-8')
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = dna.GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/serv.py | AddDigitalShortIdRecord | python | def AddDigitalShortIdRecord(site_service, tag, time_value, value,
status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
# TODO- check if the string is exactly 16 characters and convert
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual)
return nRet | This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/serv.py#L133-L168 | null | # -*- coding: utf-8 -*-
"""
pyedna.serv
-------------
This module contains functions within the EzDnaServApi, mainly used for
direct interaction with eDNA services, such as pushing data in real-time.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
import os
import warnings
import pyedna.ezdna as dna
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_short, c_ushort, c_long, c_int
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dnaserv_dll = Mock()
attrs = {'DnaAddAnalogShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdMsecRecord.return_value': 1,
'DnaFlushShortIdRecords.return_value': 1}
dnaserv_dll.configure_mock(**attrs)
return dnaserv_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dnaserv_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dnaserv_dll = _mock_edna()
def LoadDll(location):
"""
If the EZDnaServApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EZDnaServApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: full location of EZDnaServApi64.dll, including filename
"""
if os.path.isfile(location):
global dnaserv_dll
dnaserv_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def AddAnalogShortIdRecord(site_service, tag, time_value, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual)
return nRet
def AddAnalogShortIdRecordNoStatus(site_service, tag, time_value, value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecordNoStatus(szService, szPointId,
tTime, dValue)
return nRet
def AddAnalogShortIdMsecRecord(site_service, tag, time_value, msec, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual, usMsec)
return nRet
def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec,
value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService,
szPointId, tTime, dValue, usMsec)
return nRet
def AddDigitalShortIdMsecRecord(site_service, tag, time_value, msec,
value, status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdMsecRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual, usMsec)
return nRet
def FlushShortIdRecords(site_service):
"""
Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
# Try to flush the data. Function will return message regarding success.
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
return str(nRet) + szMessage.value.decode('utf-8')
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = dna.GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/serv.py | AddAnalogShortIdMsecRecord | python | def AddAnalogShortIdMsecRecord(site_service, tag, time_value, msec, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual, usMsec)
return nRet | This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/serv.py#L171-L214 | null | # -*- coding: utf-8 -*-
"""
pyedna.serv
-------------
This module contains functions within the EzDnaServApi, mainly used for
direct interaction with eDNA services, such as pushing data in real-time.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
import os
import warnings
import pyedna.ezdna as dna
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_short, c_ushort, c_long, c_int
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dnaserv_dll = Mock()
attrs = {'DnaAddAnalogShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdMsecRecord.return_value': 1,
'DnaFlushShortIdRecords.return_value': 1}
dnaserv_dll.configure_mock(**attrs)
return dnaserv_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dnaserv_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dnaserv_dll = _mock_edna()
def LoadDll(location):
"""
If the EZDnaServApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EZDnaServApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: full location of EZDnaServApi64.dll, including filename
"""
if os.path.isfile(location):
global dnaserv_dll
dnaserv_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def AddAnalogShortIdRecord(site_service, tag, time_value, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual)
return nRet
def AddAnalogShortIdRecordNoStatus(site_service, tag, time_value, value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecordNoStatus(szService, szPointId,
tTime, dValue)
return nRet
def AddDigitalShortIdRecord(site_service, tag, time_value, value,
status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
# TODO- check if the string is exactly 16 characters and convert
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual)
return nRet
def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec,
value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService,
szPointId, tTime, dValue, usMsec)
return nRet
def AddDigitalShortIdMsecRecord(site_service, tag, time_value, msec,
value, status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdMsecRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual, usMsec)
return nRet
def FlushShortIdRecords(site_service):
"""
Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
# Try to flush the data. Function will return message regarding success.
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
return str(nRet) + szMessage.value.decode('utf-8')
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = dna.GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/serv.py | AddAnalogShortIdMsecRecordNoStatus | python | def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec,
value):
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService,
szPointId, tTime, dValue, usMsec)
return nRet | This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/serv.py#L217-L241 | null | # -*- coding: utf-8 -*-
"""
pyedna.serv
-------------
This module contains functions within the EzDnaServApi, mainly used for
direct interaction with eDNA services, such as pushing data in real-time.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
import os
import warnings
import pyedna.ezdna as dna
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_short, c_ushort, c_long, c_int
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dnaserv_dll = Mock()
attrs = {'DnaAddAnalogShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdMsecRecord.return_value': 1,
'DnaFlushShortIdRecords.return_value': 1}
dnaserv_dll.configure_mock(**attrs)
return dnaserv_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dnaserv_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dnaserv_dll = _mock_edna()
def LoadDll(location):
"""
If the EZDnaServApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EZDnaServApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: full location of EZDnaServApi64.dll, including filename
"""
if os.path.isfile(location):
global dnaserv_dll
dnaserv_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def AddAnalogShortIdRecord(site_service, tag, time_value, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual)
return nRet
def AddAnalogShortIdRecordNoStatus(site_service, tag, time_value, value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecordNoStatus(szService, szPointId,
tTime, dValue)
return nRet
def AddDigitalShortIdRecord(site_service, tag, time_value, value,
status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
# TODO- check if the string is exactly 16 characters and convert
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual)
return nRet
def AddAnalogShortIdMsecRecord(site_service, tag, time_value, msec, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual, usMsec)
return nRet
def AddDigitalShortIdMsecRecord(site_service, tag, time_value, msec,
value, status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdMsecRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual, usMsec)
return nRet
def FlushShortIdRecords(site_service):
"""
Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
# Try to flush the data. Function will return message regarding success.
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
return str(nRet) + szMessage.value.decode('utf-8')
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = dna.GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/serv.py | AddDigitalShortIdMsecRecord | python | def AddDigitalShortIdMsecRecord(site_service, tag, time_value, msec,
value, status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdMsecRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual, usMsec)
return nRet | This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/serv.py#L244-L280 | null | # -*- coding: utf-8 -*-
"""
pyedna.serv
-------------
This module contains functions within the EzDnaServApi, mainly used for
direct interaction with eDNA services, such as pushing data in real-time.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
import os
import warnings
import pyedna.ezdna as dna
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_short, c_ushort, c_long, c_int
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dnaserv_dll = Mock()
attrs = {'DnaAddAnalogShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdMsecRecord.return_value': 1,
'DnaFlushShortIdRecords.return_value': 1}
dnaserv_dll.configure_mock(**attrs)
return dnaserv_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dnaserv_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dnaserv_dll = _mock_edna()
def LoadDll(location):
"""
If the EZDnaServApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EZDnaServApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: full location of EZDnaServApi64.dll, including filename
"""
if os.path.isfile(location):
global dnaserv_dll
dnaserv_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def AddAnalogShortIdRecord(site_service, tag, time_value, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual)
return nRet
def AddAnalogShortIdRecordNoStatus(site_service, tag, time_value, value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecordNoStatus(szService, szPointId,
tTime, dValue)
return nRet
def AddDigitalShortIdRecord(site_service, tag, time_value, value,
status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
# TODO- check if the string is exactly 16 characters and convert
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual)
return nRet
def AddAnalogShortIdMsecRecord(site_service, tag, time_value, msec, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual, usMsec)
return nRet
def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec,
value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService,
szPointId, tTime, dValue, usMsec)
return nRet
def FlushShortIdRecords(site_service):
"""
Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
# Try to flush the data. Function will return message regarding success.
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
return str(nRet) + szMessage.value.decode('utf-8')
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = dna.GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
drericstrong/pyedna | pyedna/serv.py | FlushShortIdRecords | python | def FlushShortIdRecords(site_service):
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
# Try to flush the data. Function will return message regarding success.
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
return str(nRet) + szMessage.value.decode('utf-8') | Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful | train | https://github.com/drericstrong/pyedna/blob/b8f8f52def4f26bb4f3a993ce3400769518385f6/pyedna/serv.py#L283-L298 | null | # -*- coding: utf-8 -*-
"""
pyedna.serv
-------------
This module contains functions within the EzDnaServApi, mainly used for
direct interaction with eDNA services, such as pushing data in real-time.
:copyright: (c) 2017 Eric Strong.
:license: Refer to LICENSE.txt for more information.
"""
import os
import warnings
import pyedna.ezdna as dna
from unittest.mock import Mock
from ctypes import cdll, byref, create_string_buffer
from ctypes import c_char_p, c_double, c_short, c_ushort, c_long, c_int
def _mock_edna():
# This function will mock all the methods that were used in the dna_dll.
# It's necessary so that documentation can be automatically created.
dnaserv_dll = Mock()
attrs = {'DnaAddAnalogShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecord.return_value': 1,
'DnaAddAnalogShortIdMsecRecordNoStatus.return_value': 1,
'DnaAddDigitalShortIdMsecRecord.return_value': 1,
'DnaFlushShortIdRecords.return_value': 1}
dnaserv_dll.configure_mock(**attrs)
return dnaserv_dll
# This code should execute at the beginning of the module import, because
# all of the functions in this module require the dna_dll library to be
# loaded. See "LoadDll" if not in default location
default_location = "C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll"
if os.path.isfile(default_location):
dnaserv_dll = cdll.LoadLibrary(default_location)
else:
warnings.warn("ERROR- no eDNA dll detected at " +
"C:\\Program Files (x86)\\eDNA\\EzDnaApi64.dll" +
" . Please manually load dll using the LoadDll function. " +
"Mocking dll, but all functions will fail until " +
"dll is manually loaded...")
dnaserv_dll = _mock_edna()
def LoadDll(location):
"""
If the EZDnaServApi64.dll file is not in the default location
(C:\Program Files (x86)\eDNA\EZDnaServApi64.dll) then the user must specify
the correct location of the file, before this module can be used.
:param location: full location of EZDnaServApi64.dll, including filename
"""
if os.path.isfile(location):
global dnaserv_dll
dnaserv_dll = cdll.LoadLibrary(location)
else:
raise Exception("ERROR- file does not exist at " + location)
def AddAnalogShortIdRecord(site_service, tag, time_value, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual)
return nRet
def AddAnalogShortIdRecordNoStatus(site_service, tag, time_value, value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdRecordNoStatus(szService, szPointId,
tTime, dValue)
return nRet
def AddDigitalShortIdRecord(site_service, tag, time_value, value,
status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
# TODO- check if the string is exactly 16 characters and convert
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual)
return nRet
def AddAnalogShortIdMsecRecord(site_service, tag, time_value, msec, value,
low_warn=False, high_warn=False, low_alarm=False, high_alarm=False,
oor_low=False, oor_high=False, unreliable=False, manual=False):
"""
This function will add an analog value to the specified eDNA service and
tag, with many optional status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:param low_warn: TRUE if the point is in a low warning state
:param high_warn: TRUE if the point is in a high warning state
:param low_alarm: TRUE if the point is in a low alarm state
:param high_alarm: TRUE if the point is in a high alarm state
:param oor_low: TRUE if the point is out-of-range low
:param oor_high: TRUE if the point is out-of-range high
:param unreliable: TRUE if the point is unreliable
:param manual: TRUE if the point is manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
bLowWarning = c_int(int(low_warn))
bHighWarning = c_int(int(high_warn))
bLowAlarm = c_int(int(low_alarm))
bHighAlarm = c_int(int(high_alarm))
bOutOfRangeLow = c_int(int(oor_low))
bOutOfRangeHigh = c_int(int(oor_high))
bUnReliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecord(szService, szPointId,
tTime, dValue, bLowWarning, bHighWarning, bLowAlarm,
bHighAlarm, bOutOfRangeLow, bOutOfRangeHigh, bUnReliable,
bManual, usMsec)
return nRet
def AddAnalogShortIdMsecRecordNoStatus(site_service, tag, time_value, msec,
value):
"""
This function will add an analog value to the specified eDNA service and
tag, without an associated point status.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: The value associated with the above time.
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
dValue = c_double(value)
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddAnalogShortIdMsecRecordNoStatus(szService,
szPointId, tTime, dValue, usMsec)
return nRet
def AddDigitalShortIdMsecRecord(site_service, tag, time_value, msec,
value, status_string="OK ", warn=False, chattering=False,
unreliable=False, manual=False):
"""
This function will add a digital value to the specified eDNA service and
tag, including all default point status definitions.
:param site_service: The site.service where data will be pushed
:param tag: The eDNA tag to push data. Tag only (e.g. ADE1CA01)
:param time_value: The time of the point, which MUST be in UTC Epoch
format. For example, "1483926416" not "2016/01/01 01:01:01".
:param msec: The additional milliseconds for the time_value
:param value: should be either TRUE or FALSE
:param status_string: a string that must be EXACTLY 16 characters
:param warn: TRUE if the point is in a warning state
:param chattering: TRUE if the point is in a chattering state
:param unreliable: TRUE if the point is in an unreliable state
:param manual: TRUE if the point was manually set
:return: 0, if the data push is successful
"""
# Define all required variables in the correct ctypes format
szService = c_char_p(site_service.encode('utf-8'))
szPointId = c_char_p(tag.encode('utf-8'))
tTime = c_long(int(time_value))
szStatus = create_string_buffer(status_string.encode('utf-8'))
bSet = c_int(int(value))
bDigitalWarning = c_int(int(warn))
bDigitalChattering = c_int(int(chattering))
bUnreliable = c_int(int(unreliable))
bManual = c_int(int(manual))
usMsec = c_ushort(msec)
# Try to push the data. Function will return 0 if successful.
nRet = dnaserv_dll.DnaAddDigitalShortIdMsecRecord(szService, szPointId,
tTime, bSet, szStatus, bDigitalWarning, bDigitalChattering,
bUnreliable, bManual, usMsec)
return nRet
# At the end of the module, we need to check that at least one eDNA service
# is connected. Otherwise, there is a problem with the eDNA connection.
service_array = dna.GetServices()
num_services = 0
if not service_array.empty:
num_services = str(len(service_array))
print("Successfully connected to " + num_services + " eDNA services.")
# Cleanup the unnecessary variables
del(service_array, num_services, default_location)
|
tdegeus/GooseMPL | GooseMPL/__init__.py | find_latex_font_serif | python | def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None | r'''
Find an available font to mimic LaTeX, and return its name. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L25-L51 | [
"name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]\n"
] | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | copy_style | python | def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style) | r'''
Write all goose-styles to the relevant matplotlib configuration directory. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L55-L137 | [
"def find_latex_font_serif():\n r'''\nFind an available font to mimic LaTeX, and return its name.\n '''\n\n import os, re\n import matplotlib.font_manager\n\n name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]\n\n fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')\n\n matches = [\n r'.*Computer\\ Modern\\ Roman.*',\n r'.*CMU\\ Serif.*',\n r'.*CMU.*',\n r'.*Times.*',\n r'.*DejaVu.*',\n r'.*Serif.*',\n ]\n\n for match in matches:\n for font in fonts:\n if re.match(match,font):\n return name(font)\n\n return None\n"
] | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | set_decade_lims | python | def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX]) | r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both). | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L141-L176 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | scale_lim | python | def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim | r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L180-L203 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | abs2rel_y | python | def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin) | r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L247-L283 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | rel2abs_x | python | def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin) | r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L287-L323 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | subplots | python | def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs) | r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions). | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L367-L397 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | plot | python | def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs) | r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L401-L435 | [
"def rel2abs_x(x, axis=None):\n r'''\nTransform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a\nfraction of the relevant axis. Be sure to set the limits and scale before calling this function!\n\n:arguments:\n\n **x** (``float``, ``list``)\n Relative coordinates.\n\n:options:\n\n **axis** ([``plt.gca()``] | ...)\n Specify the axis to which to apply the limits.\n\n:returns:\n\n **x** (``float``, ``list``)\n Absolute coordinates.\n '''\n\n # get current axis\n if axis is None:\n axis = plt.gca()\n\n # get current limits\n xmin, xmax = axis.get_xlim()\n\n # transform\n # - log scale\n if axis.get_xscale() == 'log':\n try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]\n except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))\n # - normal scale\n else:\n try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]\n except: return xmin+x*(xmax-xmin)\n",
"def rel2abs_y(y, axis=None):\n r'''\nTransform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a\nfraction of the relevant axis. Be sure to set the limits and scale before calling this function!\n\n:arguments:\n\n **y** (``float``, ``list``)\n Relative coordinates.\n\n:options:\n\n **axis** ([``plt.gca()``] | ...)\n Specify the axis to which to apply the limits.\n\n:returns:\n\n **y** (``float``, ``list``)\n Absolute coordinates.\n '''\n\n # get current axis\n if axis is None:\n axis = plt.gca()\n\n # get current limits\n ymin, ymax = axis.get_ylim()\n\n # transform\n # - log scale\n if axis.get_xscale() == 'log':\n try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]\n except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))\n # - normal scale\n else:\n try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]\n except: return ymin+y*(ymax-ymin)\n"
] | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | diagonal_powerlaw | python | def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs) | r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any). | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L480-L544 | [
"def plot_powerlaw(exp, startx, starty, width=None, **kwargs):\n r'''\nPlot a power-law.\n\n:arguments:\n\n **exp** (``float``)\n The power-law exponent.\n\n **startx, starty** (``float``)\n Start coordinates.\n\n:options:\n\n **width, height, endx, endy** (``float``)\n Definition of the end coordinate (only on of these options is needed).\n\n **units** ([``'relative'``] | ``'absolute'``)\n The type of units in which the coordinates are specified. Relative coordinates correspond to a\n fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and\n scale before calling this function!\n\n **axis** ([``plt.gca()``] | ...)\n Specify the axis to which to apply the limits.\n\n ...\n Any ``plt.plot(...)`` option.\n\n:returns:\n\n The handle of the ``plt.plot(...)`` command.\n '''\n\n # get options/defaults\n endx = kwargs.pop('endx' , None )\n endy = kwargs.pop('endy' , None )\n height = kwargs.pop('height', None )\n units = kwargs.pop('units' , 'relative')\n axis = kwargs.pop('axis' , plt.gca() )\n\n # check\n if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':\n raise IOError('This function only works on a log-log scale, where the power-law is a straight line')\n\n # apply width/height\n if width is not None:\n\n endx = startx + width\n endy = None\n\n elif height is not None:\n\n if exp > 0: endy = starty + height\n elif exp == 0: endy = starty\n else : endy = starty - height\n\n endx = None\n\n # transform\n if units.lower() == 'relative':\n [startx, endx] = rel2abs_x([startx, endx], axis)\n [starty, endy] = rel2abs_y([starty, endy], axis)\n\n # determine multiplication constant\n const = starty / ( startx**exp )\n\n # get end x/y-coordinate\n if endx is not None: endy = const * endx**exp\n else : endx = ( endy / const )**( 1/exp )\n\n # plot\n return axis.plot([startx, endx], [starty, endy], **kwargs)\n"
] | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | annotate_powerlaw | python | def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs) | r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L548-L626 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | plot_powerlaw | python | def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs) | r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L630-L701 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | grid_powerlaw | python | def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines | r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L705-L836 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | histogram_bin_edges_minwidth | python | def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] )) | r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L840-L867 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | histogram_bin_edges_mincount | python | def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] )) | r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L871-L906 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | histogram_bin_edges | python | def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges | r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L910-L1020 | [
"def histogram_bin_edges_minwidth(min_width, bins):\n r'''\nMerge bins with right-neighbour until each bin has a minimum width.\n\n:arguments:\n\n **bins** (``<array_like>``)\n The bin-edges.\n\n **min_width** (``<float>``)\n The minimum bin width.\n '''\n\n # escape\n if min_width is None : return bins\n if min_width is False: return bins\n\n # keep removing where needed\n while True:\n\n idx = np.where(np.diff(bins) < min_width)[0]\n\n if len(idx) == 0: return bins\n\n idx = idx[0]\n\n if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))\n else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))\n",
"def histogram_bin_edges_mincount(data, min_count, bins):\n r'''\nMerge bins with right-neighbour until each bin has a minimum number of data-points.\n\n:arguments:\n\n **data** (``<array_like>``)\n Input data. The histogram is computed over the flattened array.\n\n **bins** (``<array_like>`` | ``<int>``)\n The bin-edges (or the number of bins, automatically converted to equal-sized bins).\n\n **min_count** (``<int>``)\n The minimum number of data-points per bin.\n '''\n\n # escape\n if min_count is None : return bins\n if min_count is False: return bins\n\n # check\n if type(min_count) != int: raise IOError('\"min_count\" must be an integer number')\n\n # keep removing where needed\n while True:\n\n P, _ = np.histogram(data, bins=bins, density=False)\n\n idx = np.where(P < min_count)[0]\n\n if len(idx) == 0: return bins\n\n idx = idx[0]\n\n if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))\n else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))\n"
] | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | histogram | python | def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x | r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L1024-L1045 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | histogram_cumulative | python | def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges | r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L1049-L1076 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | hist | python | def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p | r'''
Plot histogram. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L1080-L1129 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | cdf | python | def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) ) | Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L1133-L1151 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
# ==================================================================================================
def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
|
tdegeus/GooseMPL | GooseMPL/__init__.py | patch | python | def patch(*args,**kwargs):
'''
Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ):
raise IOError('Specify both "coor" and "conn"')
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
coor = kwargs.pop( 'coor' , None )
conn = kwargs.pop( 'conn' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert mesh -> list of Polygons
if coor is not None and conn is not None:
poly = []
for iconn in conn:
poly.append(Polygon(coor[iconn,:]))
args = tuple(poly, *args)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ np.min(coor[:,0]) , np.max(coor[:,0]) ]
ylim = [ np.min(coor[:,1]) , np.max(coor[:,1]) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p | Add patches to plot. The color of the patches is indexed according to a specified color-index.
:example:
Plot a finite element mesh: the outline of the undeformed configuration, and the deformed
configuration for which the elements get a color e.g. based on stress::
import matplotlib.pyplot as plt
import goosempl as gplt
fig,ax = plt.subplots()
p = gplt.patch(coor=coor+disp,conn=conn,axis=ax,cindex=stress,cmap='YlOrRd',edgecolor=None)
_ = gplt.patch(coor=coor ,conn=conn,axis=ax)
cbar = fig.colorbar(p,axis=ax,aspect=10)
plt.show()
:arguments - option 1/2:
**patches** (``<list>``)
List with patch objects. Can be replaced by specifying ``coor`` and ``conn``.
:arguments - option 2/2:
**coor** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the coordinates (positions) of each node.
**conn** (``<numpy.ndarray>`` | ``<list>`` (nested))
Matrix with on each row the number numbers (rows in ``coor``) which form an element (patch).
:options:
**cindex** (``<numpy.ndarray>``)
Array with, for each patch, the value that should be indexed to a color.
**axis** (``<matplotlib>``)
Specify an axis to include to plot in. By default the current axis is used.
**autoscale** ([``True``] | ``False``)
Automatically update the limits of the plot (currently automatic limits of Collections are not
supported by matplotlib).
:recommended options:
**cmap** (``<str>`` | ...)
Specify a colormap.
**linewidth** (``<float>``)
Width of the edges.
**edgecolor** (``<str>`` | ...)
Color of the edges.
**clim** (``(<float>,<float>)``)
Lower and upper limit of the color-axis.
:returns:
**handle** (``<matplotlib>``)
Handle of the patch objects.
.. seealso::
* `matplotlib example
<http://matplotlib.org/examples/api/patch_collection.html>`_. | train | https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L1155-L1271 | null | '''
This module provides some extensions to matplotlib.
:dependencies:
* numpy
* matplotlib
:copyright:
| Tom de Geus
| tom@geus.me
| http://www.geus.me
'''
# ==================================================================================================
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os,re,sys
# ==================================================================================================
def find_latex_font_serif():
r'''
Find an available font to mimic LaTeX, and return its name.
'''
import os, re
import matplotlib.font_manager
name = lambda font: os.path.splitext(os.path.split(font)[-1])[0].split(' - ')[0]
fonts = matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
matches = [
r'.*Computer\ Modern\ Roman.*',
r'.*CMU\ Serif.*',
r'.*CMU.*',
r'.*Times.*',
r'.*DejaVu.*',
r'.*Serif.*',
]
for match in matches:
for font in fonts:
if re.match(match,font):
return name(font)
return None
# --------------------------------------------------------------------------------------------------
def copy_style():
r'''
Write all goose-styles to the relevant matplotlib configuration directory.
'''
import os
import matplotlib
# style definitions
# -----------------
styles = {}
styles['goose.mplstyle'] = '''
figure.figsize : 8,6
font.weight : normal
font.size : 16
axes.labelsize : medium
axes.titlesize : medium
xtick.labelsize : small
ytick.labelsize : small
xtick.top : True
ytick.right : True
axes.facecolor : none
axes.prop_cycle : cycler('color',['k', 'r', 'g', 'b', 'y', 'c', 'm'])
legend.fontsize : medium
legend.fancybox : true
legend.columnspacing : 1.0
legend.handletextpad : 0.2
lines.linewidth : 2
image.cmap : afmhot
image.interpolation : nearest
image.origin : lower
savefig.facecolor : none
figure.autolayout : True
errorbar.capsize : 2
'''
styles['goose-tick-in.mplstyle'] = '''
xtick.direction : in
ytick.direction : in
'''
styles['goose-tick-lower.mplstyle'] = '''
xtick.top : False
ytick.right : False
axes.spines.top : False
axes.spines.right : False
'''
if find_latex_font_serif() is not None:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.serif : {serif:s}
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''.format(serif=find_latex_font_serif())
else:
styles['goose-latex.mplstyle'] = r'''
font.family : serif
font.weight : bold
font.size : 18
text.usetex : true
text.latex.preamble : \usepackage{{amsmath}},\usepackage{{amsfonts}},\usepackage{{amssymb}},\usepackage{{bm}}
'''
# write style definitions
# -----------------------
# directory name where the styles are stored
dirname = os.path.abspath(os.path.join(matplotlib.get_configdir(), 'stylelib'))
# make directory if it does not yet exist
if not os.path.isdir(dirname): os.makedirs(dirname)
# write all styles
for fname, style in styles.items():
open(os.path.join(dirname, fname),'w').write(style)
# ==================================================================================================
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
# ==================================================================================================
def scale_lim(lim,factor=1.05):
r'''
Scale limits to be 5% wider, to have a nice plot.
:arguments:
**lim** (``<list>`` | ``<str>``)
The limits. May be a string "[...,...]", which is converted to a list.
:options:
**factor** ([``1.05``] | ``<float>``)
Scale factor.
'''
# convert string "[...,...]"
if type(lim) == str: lim = eval(lim)
# scale limits
D = lim[1] - lim[0]
lim[0] -= (factor-1.)/2. * D
lim[1] += (factor-1.)/2. * D
return lim
# ==================================================================================================
def abs2rel_x(x, axis=None):
r'''
Transform absolute x-coordinates to relative x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin)) if i is not None else i for i in x]
except: return (np.log10(x)-np.log10(xmin))/(np.log10(xmax)-np.log10(xmin))
# - normal scale
else:
try : return [(i-xmin)/(xmax-xmin) if i is not None else i for i in x]
except: return (x-xmin)/(xmax-xmin)
# ==================================================================================================
def abs2rel_y(y, axis=None):
r'''
Transform absolute y-coordinates to relative y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Absolute coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Relative coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [(np.log10(i)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin)) if i is not None else i for i in y]
except: return (np.log10(y)-np.log10(ymin))/(np.log10(ymax)-np.log10(ymin))
# - normal scale
else:
try : return [(i-ymin)/(ymax-ymin) if i is not None else i for i in y]
except: return (y-ymin)/(ymax-ymin)
# ==================================================================================================
def rel2abs_x(x, axis=None):
r'''
Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**x** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**x** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
xmin, xmax = axis.get_xlim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x]
except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin)))
# - normal scale
else:
try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x]
except: return xmin+x*(xmax-xmin)
# ==================================================================================================
def rel2abs_y(y, axis=None):
r'''
Transform relative y-coordinates to absolute y-coordinates. Relative coordinates correspond to a
fraction of the relevant axis. Be sure to set the limits and scale before calling this function!
:arguments:
**y** (``float``, ``list``)
Relative coordinates.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
:returns:
**y** (``float``, ``list``)
Absolute coordinates.
'''
# get current axis
if axis is None:
axis = plt.gca()
# get current limits
ymin, ymax = axis.get_ylim()
# transform
# - log scale
if axis.get_xscale() == 'log':
try : return [10.**(np.log10(ymin)+i*(np.log10(ymax)-np.log10(ymin))) if i is not None else i for i in y]
except: return 10.**(np.log10(ymin)+y*(np.log10(ymax)-np.log10(ymin)))
# - normal scale
else:
try : return [ymin+i*(ymax-ymin) if i is not None else i for i in y]
except: return ymin+y*(ymax-ymin)
# ==================================================================================================
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs)
# ==================================================================================================
def plot(x, y, units='absolute', axis=None, **kwargs):
r'''
Plot.
:arguments:
**x, y** (``list``)
Coordinates.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.plot(x, y, **kwargs)
# ==================================================================================================
def text(x, y, text, units='absolute', axis=None, **kwargs):
r'''
Plot a text.
:arguments:
**x, y** (``float``)
Coordinates.
**text** (``str``)
Text to plot.
:options:
**units** ([``'absolute'``] | ``'relative'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get current axis
if axis is None:
axis = plt.gca()
# transform
if units.lower() == 'relative':
x = rel2abs_x(x, axis)
y = rel2abs_y(y, axis)
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def diagonal_powerlaw(exp, ll=None, lr=None, tl=None, tr=None, width=None, height=None, plot=False, **kwargs):
r'''
Set the limits such that a power-law with a certain exponent lies on the diagonal.
:arguments:
**exp** (``<float>``)
The power-law exponent.
**ll, lr, tl, tr** (``<list>``)
Coordinates of the lower-left, or the lower-right, or the top-left, or the top-right corner.
**width, height** (``<float>``)
Width or the height.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**plot** ([``False``] | ``True``)
Plot the diagonal.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command (if any).
'''
axis = kwargs.pop('axis', plt.gca())
if width and not height: width = np.log(width )
elif height and not width : height = np.log(height)
else: raise IOError('Specify "width" or "height"')
if ll and not lr and not tl and not tr: ll = [ np.log(ll[0]), np.log(ll[1]) ]
elif lr and not ll and not tl and not tr: lr = [ np.log(lr[0]), np.log(lr[1]) ]
elif tl and not lr and not ll and not tr: tl = [ np.log(tl[0]), np.log(tl[1]) ]
elif tr and not lr and not tl and not ll: tr = [ np.log(tr[0]), np.log(tr[1]) ]
else: raise IOError('Specify "ll" or "lr" or "tl" or "tr"')
axis.set_xscale('log')
axis.set_yscale('log')
if width : height = width * np.abs(exp)
elif height: width = height / np.abs(exp)
if ll:
axis.set_xlim(sorted([np.exp(ll[0]), np.exp(ll[0]+width )]))
axis.set_ylim(sorted([np.exp(ll[1]), np.exp(ll[1]+height)]))
elif lr:
axis.set_xlim(sorted([np.exp(lr[0]), np.exp(lr[0]-width )]))
axis.set_ylim(sorted([np.exp(lr[1]), np.exp(lr[1]+height)]))
elif tl:
axis.set_xlim(sorted([np.exp(tl[0]), np.exp(tl[0]+width )]))
axis.set_ylim(sorted([np.exp(tl[1]), np.exp(tl[1]-height)]))
elif tr:
axis.set_xlim(sorted([np.exp(tr[0]), np.exp(tr[0]-width )]))
axis.set_ylim(sorted([np.exp(tr[1]), np.exp(tr[1]-height)]))
if plot:
if exp > 0: return plot_powerlaw(exp, 0., 0., 1., **kwargs)
else : return plot_powerlaw(exp, 0., 1., 1., **kwargs)
# ==================================================================================================
def annotate_powerlaw(text, exp, startx, starty, width=None, rx=0.5, ry=0.5, **kwargs):
r'''
Added a label to the middle of a power-law annotation (see ``goosempl.plot_powerlaw``).
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**rx, ry** (``float``)
Shift in x- and y-direction w.r.t. the default coordinates.
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.text(...)`` option.
:returns:
The handle of the ``plt.text(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# middle
x = 10. ** ( np.log10(startx) + rx * ( np.log10(endx) - np.log10(startx) ) )
y = 10. ** ( np.log10(starty) + ry * ( np.log10(endy) - np.log10(starty) ) )
# plot
return axis.text(x, y, text, **kwargs)
# ==================================================================================================
def plot_powerlaw(exp, startx, starty, width=None, **kwargs):
r'''
Plot a power-law.
:arguments:
**exp** (``float``)
The power-law exponent.
**startx, starty** (``float``)
Start coordinates.
:options:
**width, height, endx, endy** (``float``)
Definition of the end coordinate (only on of these options is needed).
**units** ([``'relative'``] | ``'absolute'``)
The type of units in which the coordinates are specified. Relative coordinates correspond to a
fraction of the relevant axis. If you use relative coordinates, be sure to set the limits and
scale before calling this function!
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# get options/defaults
endx = kwargs.pop('endx' , None )
endy = kwargs.pop('endy' , None )
height = kwargs.pop('height', None )
units = kwargs.pop('units' , 'relative')
axis = kwargs.pop('axis' , plt.gca() )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# apply width/height
if width is not None:
endx = startx + width
endy = None
elif height is not None:
if exp > 0: endy = starty + height
elif exp == 0: endy = starty
else : endy = starty - height
endx = None
# transform
if units.lower() == 'relative':
[startx, endx] = rel2abs_x([startx, endx], axis)
[starty, endy] = rel2abs_y([starty, endy], axis)
# determine multiplication constant
const = starty / ( startx**exp )
# get end x/y-coordinate
if endx is not None: endy = const * endx**exp
else : endx = ( endy / const )**( 1/exp )
# plot
return axis.plot([startx, endx], [starty, endy], **kwargs)
# ==================================================================================================
def grid_powerlaw(exp, insert=0, skip=0, end=-1, step=0, axis=None, **kwargs):
r'''
Draw a power-law grid: a grid that respects a certain power-law exponent. The grid-lines start from
the positions of the ticks.
:arguments:
**exp** (``float``)
The power-law exponent.
:options:
**insert** (``<int>``)
Insert extra lines in between the default lines set by the tick positions.
**skip, end, step** (``<int>``)
Select from the lines based on ``coor = coor[skip:end:step]``.
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
...
Any ``plt.plot(...)`` option.
:returns:
The handle of the ``plt.plot(...)`` command.
'''
# default axis
if axis is None: axis = plt.gca()
# default plot settings
kwargs.setdefault('color' , 'k' )
kwargs.setdefault('linestyle', '--')
kwargs.setdefault('linewidth', 1 )
# check
if axis.get_xscale() != 'log' or axis.get_yscale() != 'log':
raise IOError('This function only works on a log-log scale, where the power-law is a straight line')
# zero-exponent: draw horizontal lines
if exp == 0:
# y-coordinate of the start positions
starty = abs2rel_y(axis.get_yticks(), axis=axis)
# insert extra coordinates
if insert > 0:
n = len(starty)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
starty = np.interp(x, xp, starty)
# skip coordinates
starty = starty[int(skip):int(end):int(1+step)]
# set remaining coordinates
endy = starty
startx = np.zeros((len(starty)))
endx = np.ones ((len(starty)))
# all other exponents
else:
# get the axis' size in real coordinates
# - get the limits
xmin, xmax = axis.get_xlim()
ymin, ymax = axis.get_ylim()
# - compute the size in both directions
deltax = np.log10(xmax) - np.log10(xmin)
deltay = np.log10(ymax) - np.log10(ymin)
# convert the exponent in real coordinates to an exponent in relative coordinates
b = np.abs(exp) * deltax / deltay
# x-coordinate of the start positions
startx = abs2rel_x(axis.get_xticks(), axis=axis)
# compute how many labels need to be prepended
Dx = startx[1] - startx[0]
nneg = int(np.floor(1./(b*Dx))) - 1
# add extra to be sure
if insert > 0:
nneg += 1
# prepend
if nneg > 0:
startx = np.hstack(( startx[0]+np.cumsum(-Dx * np.ones((nneg)))[::-1], startx ))
# insert extra coordinates
if insert > 0:
n = len(startx)
x = np.linspace(0,1,n+(n-1)*int(insert))
xp = np.linspace(0,1,n)
startx = np.interp(x, xp, startx)
# skip coordinates
if step > 0:
startx = startx[int(skip)::int(1+step)]
# x-coordinate of the end of the lines
endx = startx + 1/b
# y-coordinate of the start and the end of the lines
if exp > 0:
starty = np.zeros((len(startx)))
endy = np.ones ((len(startx)))
else:
starty = np.ones ((len(startx)))
endy = np.zeros((len(startx)))
# convert to real coordinates
startx = rel2abs_x(startx, axis)
endx = rel2abs_x(endx , axis)
starty = rel2abs_y(starty, axis)
endy = rel2abs_y(endy , axis)
# plot
lines = axis.plot(np.vstack(( startx, endx )), np.vstack(( starty, endy )), **kwargs)
# remove access in labels
plt.setp(lines[1:], label="_")
# return handles
return lines
# ==================================================================================================
def histogram_bin_edges_minwidth(min_width, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum width.
:arguments:
**bins** (``<array_like>``)
The bin-edges.
**min_width** (``<float>``)
The minimum bin width.
'''
# escape
if min_width is None : return bins
if min_width is False: return bins
# keep removing where needed
while True:
idx = np.where(np.diff(bins) < min_width)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(bins)-1: bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges_mincount(data, min_count, bins):
r'''
Merge bins with right-neighbour until each bin has a minimum number of data-points.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
**bins** (``<array_like>`` | ``<int>``)
The bin-edges (or the number of bins, automatically converted to equal-sized bins).
**min_count** (``<int>``)
The minimum number of data-points per bin.
'''
# escape
if min_count is None : return bins
if min_count is False: return bins
# check
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
# keep removing where needed
while True:
P, _ = np.histogram(data, bins=bins, density=False)
idx = np.where(P < min_count)[0]
if len(idx) == 0: return bins
idx = idx[0]
if idx+1 == len(P): bins = np.hstack(( bins[:(idx) ], bins[-1] ))
else : bins = np.hstack(( bins[:(idx+1)], bins[(idx+2):] ))
# ==================================================================================================
def histogram_bin_edges(data, bins=10, mode='equal', min_count=None, integer=False, remove_empty_edges=True, min_width=None):
r'''
Determine bin-edges.
:arguments:
**data** (``<array_like>``)
Input data. The histogram is computed over the flattened array.
:options:
**bins** ([``10``] | ``<int>``)
The number of bins.
**mode** ([``'equal'`` | ``<str>``)
Mode with which to compute the bin-edges:
* ``'equal'``: each bin has equal width.
* ``'log'``: logarithmic spacing.
* ``'uniform'``: uniform number of data-points per bin.
**min_count** (``<int>``)
The minimum number of data-points per bin.
**min_width** (``<float>``)
The minimum width of each bin.
**integer** ([``False``] | [``True``])
If ``True``, bins not encompassing an integer are removed
(e.g. a bin with edges ``[1.1, 1.9]`` is removed, but ``[0.9, 1.1]`` is not removed).
**remove_empty_edges** ([``True``] | [``False``])
Remove empty bins at the beginning or the end.
:returns:
**bin_edges** (``<array of dtype float>``)
The edges to pass into histogram.
'''
# determine the bin-edges
if mode == 'equal':
bin_edges = np.linspace(np.min(data),np.max(data),bins+1)
elif mode == 'log':
bin_edges = np.logspace(np.log10(np.min(data)),np.log10(np.max(data)),bins+1)
elif mode == 'uniform':
# - check
if hasattr(bins, "__len__"):
raise IOError('Only the number of bins can be specified')
# - use the minimum count to estimate the number of bins
if min_count is not None and min_count is not False:
if type(min_count) != int: raise IOError('"min_count" must be an integer number')
bins = int(np.floor(float(len(data))/float(min_count)))
# - number of data-points in each bin (equal for each)
count = int(np.floor(float(len(data))/float(bins))) * np.ones(bins, dtype='int')
# - increase the number of data-points by one is an many bins as needed,
# such that the total fits the total number of data-points
count[np.linspace(0, bins-1, len(data)-np.sum(count)).astype(np.int)] += 1
# - split the data
idx = np.empty((bins+1), dtype='int')
idx[0 ] = 0
idx[1:] = np.cumsum(count)
idx[-1] = len(data) - 1
# - determine the bin-edges
bin_edges = np.unique(np.sort(data)[idx])
else:
raise IOError('Unknown option')
# remove empty starting and ending bin (related to an unfortunate choice of bin-edges)
if remove_empty_edges:
N, _ = np.histogram(data, bins=bin_edges, density=False)
idx = np.min(np.where(N>0)[0])
jdx = np.max(np.where(N>0)[0])
bin_edges = bin_edges[(idx):(jdx+2)]
# merge bins with too few data-points (if needed)
bin_edges = histogram_bin_edges_mincount(data, min_count=min_count, bins=bin_edges)
# merge bins that have too small of a width
bin_edges = histogram_bin_edges_minwidth(min_width=min_width, bins=bin_edges)
# select only bins that encompass an integer (and retain the original bounds)
if integer:
idx = np.where(np.diff(np.floor(bin_edges))>=1)[0]
idx = np.unique(np.hstack((0, idx, len(bin_edges)-1)))
bin_edges = bin_edges[idx]
# return
return bin_edges
# ==================================================================================================
def histogram(data, return_edges=True, **kwargs):
r'''
Compute histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
'''
# use NumPy's default function to compute the histogram
P, bin_edges = np.histogram(data, **kwargs)
# return default output
if return_edges: return P, bin_edges
# convert bin_edges -> mid-points of each bin
x = np.diff(bin_edges) / 2. + bin_edges[:-1]
# return with bin mid-points
return P, x
# ==================================================================================================
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges
# ==================================================================================================
def hist(P, edges, **kwargs):
r'''
Plot histogram.
'''
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
# extract local options
axis = kwargs.pop( 'axis' , plt.gca() )
cindex = kwargs.pop( 'cindex' , None )
autoscale = kwargs.pop( 'autoscale' , True )
# set defaults
kwargs.setdefault('edgecolor','k')
# no color-index -> set transparent
if cindex is None:
kwargs.setdefault('facecolor',(0.,0.,0.,0.))
# convert -> list of Polygons
poly = []
for p, xl, xu in zip(P, edges[:-1], edges[1:]):
coor = np.array([
[xl, 0.],
[xu, 0.],
[xu, p ],
[xl, p ],
])
poly.append(Polygon(coor))
args = (poly)
# convert patches -> matplotlib-objects
p = PatchCollection(args,**kwargs)
# add colors to patches
if cindex is not None:
p.set_array(cindex)
# add patches to axis
axis.add_collection(p)
# rescale the axes manually
if autoscale:
# - get limits
xlim = [ edges[0], edges[-1] ]
ylim = [ 0 , np.max(P) ]
# - set limits +/- 10% extra margin
axis.set_xlim([xlim[0]-.1*(xlim[1]-xlim[0]),xlim[1]+.1*(xlim[1]-xlim[0])])
axis.set_ylim([ylim[0]-.1*(ylim[1]-ylim[0]),ylim[1]+.1*(ylim[1]-ylim[0])])
return p
# ==================================================================================================
def cdf(data,mode='continuous',**kwargs):
'''
Return cumulative density.
:arguments:
**data** (``<numpy.ndarray>``)
Input data, to plot the distribution for.
:returns:
**P** (``<numpy.ndarray>``)
Cumulative probability.
**x** (``<numpy.ndarray>``)
Data points.
'''
return ( np.linspace(0.0,1.0,len(data)), np.sort(data) )
# ==================================================================================================
# ==================================================================================================
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | examples | python | def examples():
''' Examples of how to use. Default are that some functions are commented out in order
to not cause harm to existing metadata within the database.
'''
sci = InterLexClient(
api_key = os.environ.get('INTERLEX_API_KEY'),
base_url = 'https://beta.scicrunch.org/api/1/', # NEVER CHANGE
)
entity = {
'label': 'brain115',
'type': 'fde', # broken at the moment NEEDS PDE HARDCODED
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
simple_entity = {
'label': entity['label'],
'type': entity['type'], # broken at the moment NEEDS PDE HARDCODED
'definition': entity['definition'],
'comment': entity['comment'],
'superclass': entity['superclass']['ilx_id'],
'synonyms': [syn['literal'] for syn in entity['synonyms']],
'predicates': {'tmp_0381624': 'http://example_dbxref'}
}
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'tmp_0381624', # hasDbXref ILX ID
'annotation_value': 'PMID:12345',
}
relationship = {
'entity1_ilx': 'ilx_0101431', # brain
'relationship_ilx': 'ilx_0115023', # Related to
'entity2_ilx': 'ilx_0108124', #organ
}
update_entity_data = {
'ilx_id': 'ilx_0101431',
'label': 'Brain',
'definition': 'update_test!!',
'type': 'fde',
'comment': 'test comment',
'superclass': 'ilx_0108124',
'synonyms': ['test', 'test2', 'test2'],
}
# resp = sci.delete_annotation(**{
# 'term_ilx_id': 'ilx_0101431', # brain ILX ID
# 'annotation_type_ilx_id': 'ilx_0115071', # hasConstraint ILX ID
# 'annotation_value': 'test_12345',
# })
relationship = {
'entity1_ilx': 'http://uri.interlex.org/base/ilx_0100001', # (R)N6 chemical ILX ID
'relationship_ilx': 'http://uri.interlex.org/base/ilx_0112772', # Afferent projection ILX ID
'entity2_ilx': 'http://uri.interlex.org/base/ilx_0100000', #1,2-Dibromo chemical ILX ID
} | Examples of how to use. Default are that some functions are commented out in order
to not cause harm to existing metadata within the database. | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L778-L846 | null | import json
import os
import requests
from sys import exit
from typing import List
class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
# print(sci.add_relationship(**relationship))
# print(resp)
# print(sci.update_entity(**update_entity_data))
# print(sci.add_raw_entity(entity))
# print(sci.add_entity(**simple_entity))
# print(sci.add_annotation(**annotation))
# print(sci.add_relationship(**relationship))
if __name__ == '__main__':
examples()
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.process_response | python | def process_response(self, response: requests.models.Response) -> dict:
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data'] | Checks for correct data response and status codes | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L63-L79 | null | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.get | python | def get(self, url: str) -> List[dict]:
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output | Requests data from database | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L81-L89 | [
"def process_response(self, response: requests.models.Response) -> dict:\n \"\"\" Checks for correct data response and status codes \"\"\"\n try:\n output = response.json()\n except json.JSONDecodeError: # Server is having a bad day and crashed.\n raise self.BadResponseError(\n 'Json not returned with status code [' + str(response.status_code) + ']')\n\n if response.status_code == 400:\n return output\n\n if response.status_code not in [200, 201]: # Safety catch.\n raise self.BadResponseError(\n str(output) + ': with status code [' + str(response.status_code) +\n '] and params:' + str(output))\n\n return output['data']\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.post | python | def post(self, url: str, data: List[dict]) -> List[dict]:
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output | Gives data to database | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L91-L103 | [
"def process_response(self, response: requests.models.Response) -> dict:\n \"\"\" Checks for correct data response and status codes \"\"\"\n try:\n output = response.json()\n except json.JSONDecodeError: # Server is having a bad day and crashed.\n raise self.BadResponseError(\n 'Json not returned with status code [' + str(response.status_code) + ']')\n\n if response.status_code == 400:\n return output\n\n if response.status_code not in [200, 201]: # Safety catch.\n raise self.BadResponseError(\n str(output) + ': with status code [' + str(response.status_code) +\n '] and params:' + str(output))\n\n return output['data']\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.process_superclass | python | def process_superclass(self, entity: List[dict]) -> List[dict]:
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity | Replaces ILX ID with superclass ID | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L115-L128 | [
"def get_entity(self, ilx_id: str) -> dict:\n \"\"\" Gets full meta data (expect their annotations and relationships) from is ILX ID \"\"\"\n ilx_id = self.fix_ilx(ilx_id)\n url = self.base_url + \"ilx/search/identifier/{identifier}?key={api_key}\".format(\n identifier = ilx_id,\n api_key = self.api_key,\n )\n return self.get(url)\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.process_synonyms | python | def process_synonyms(self, entity: List[dict]) -> List[dict]:
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity | Making sure key/value is in proper format for synonyms in entity | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L130-L139 | null | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.process_existing_ids | python | def process_existing_ids(self, entity: List[dict]) -> List[dict]:
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity | Making sure key/value is in proper format for existing_ids in entity | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L141-L152 | null | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.crude_search_scicrunch_via_label | python | def crude_search_scicrunch_via_label(self, label:str) -> dict:
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url) | Server returns anything that is simlar in any catagory | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L154-L160 | [
"def get(self, url: str) -> List[dict]:\n \"\"\" Requests data from database \"\"\"\n response = requests.get(\n url,\n headers = {'Content-type': 'application/json'},\n auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org\n )\n output = self.process_response(response)\n return output\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.check_scicrunch_for_label | python | def check_scicrunch_for_label(self, label: str) -> dict:
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {} | Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it. | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L162-L180 | [
"def crude_search_scicrunch_via_label(self, label:str) -> dict:\n \"\"\" Server returns anything that is simlar in any catagory \"\"\"\n url = self.base_url + 'term/search/{term}?key={api_key}'.format(\n term = label,\n api_key = self.api_key,\n )\n return self.get(url)\n",
"def get_entity(self, ilx_id: str) -> dict:\n \"\"\" Gets full meta data (expect their annotations and relationships) from is ILX ID \"\"\"\n ilx_id = self.fix_ilx(ilx_id)\n url = self.base_url + \"ilx/search/identifier/{identifier}?key={api_key}\".format(\n identifier = ilx_id,\n api_key = self.api_key,\n )\n return self.get(url)\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.get_entity | python | def get_entity(self, ilx_id: str) -> dict:
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url) | Gets full meta data (expect their annotations and relationships) from is ILX ID | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L182-L189 | [
"def get(self, url: str) -> List[dict]:\n \"\"\" Requests data from database \"\"\"\n response = requests.get(\n url,\n headers = {'Content-type': 'application/json'},\n auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org\n )\n output = self.process_response(response)\n return output\n",
"def fix_ilx(self, ilx_id: str) -> str:\n \"\"\" Database only excepts lower case and underscore version of ID \"\"\"\n # FIXME probably .rsplit('/', 1) is the more correct version of this\n # and because this is nominally a 'private' interface these should be\n ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')\n if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:\n raise ValueError(\n 'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)\n return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.add_raw_entity | python | def add_raw_entity(self, entity: dict) -> dict:
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output | Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
} | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L266-L401 | null | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.update_entity | python | def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output | Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L403-L517 | [
"def post(self, url: str, data: List[dict]) -> List[dict]:\n \"\"\" Gives data to database \"\"\"\n data.update({\n 'key': self.api_key,\n })\n response = requests.post(\n url,\n data = json.dumps(data),\n headers = {'Content-type': 'application/json'},\n auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org\n )\n output = self.process_response(response)\n return output\n",
"def fix_ilx(self, ilx_id: str) -> str:\n \"\"\" Database only excepts lower case and underscore version of ID \"\"\"\n # FIXME probably .rsplit('/', 1) is the more correct version of this\n # and because this is nominally a 'private' interface these should be\n ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')\n if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:\n raise ValueError(\n 'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)\n return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')\n",
"def process_superclass(self, entity: List[dict]) -> List[dict]:\n \"\"\" Replaces ILX ID with superclass ID \"\"\"\n superclass = entity.pop('superclass')\n label = entity['label']\n if not superclass.get('ilx_id'):\n raise self.SuperClassDoesNotExistError(\n f'Superclass not given an interlex ID for label: {label}')\n superclass_data = self.get_entity(superclass['ilx_id'])\n if not superclass_data['id']:\n raise self.SuperClassDoesNotExistError(\n 'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')\n # BUG: only excepts superclass_tid\n entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]\n return entity\n",
"def get_entity(self, ilx_id: str) -> dict:\n \"\"\" Gets full meta data (expect their annotations and relationships) from is ILX ID \"\"\"\n ilx_id = self.fix_ilx(ilx_id)\n url = self.base_url + \"ilx/search/identifier/{identifier}?key={api_key}\".format(\n identifier = ilx_id,\n api_key = self.api_key,\n )\n return self.get(url)\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.add_annotation | python | def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output | Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
} | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L527-L589 | [
"def post(self, url: str, data: List[dict]) -> List[dict]:\n \"\"\" Gives data to database \"\"\"\n data.update({\n 'key': self.api_key,\n })\n response = requests.post(\n url,\n data = json.dumps(data),\n headers = {'Content-type': 'application/json'},\n auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org\n )\n output = self.process_response(response)\n return output\n",
"def get_entity(self, ilx_id: str) -> dict:\n \"\"\" Gets full meta data (expect their annotations and relationships) from is ILX ID \"\"\"\n ilx_id = self.fix_ilx(ilx_id)\n url = self.base_url + \"ilx/search/identifier/{identifier}?key={api_key}\".format(\n identifier = ilx_id,\n api_key = self.api_key,\n )\n return self.get(url)\n",
"def get_annotation_via_tid(self, tid: str) -> dict:\n \"\"\" Gets annotation via anchored entity id \"\"\"\n url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(\n tid = tid,\n api_key = self.api_key,\n )\n return self.get(url)\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.delete_annotation | python | def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output | If annotation doesnt exist, add it | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L591-L642 | [
"def get_entity(self, ilx_id: str) -> dict:\n \"\"\" Gets full meta data (expect their annotations and relationships) from is ILX ID \"\"\"\n ilx_id = self.fix_ilx(ilx_id)\n url = self.base_url + \"ilx/search/identifier/{identifier}?key={api_key}\".format(\n identifier = ilx_id,\n api_key = self.api_key,\n )\n return self.get(url)\n",
"def get_annotation_via_tid(self, tid: str) -> dict:\n \"\"\" Gets annotation via anchored entity id \"\"\"\n url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(\n tid = tid,\n api_key = self.api_key,\n )\n return self.get(url)\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.add_relationship | python | def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output | Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L651-L713 | [
"def post(self, url: str, data: List[dict]) -> List[dict]:\n \"\"\" Gives data to database \"\"\"\n data.update({\n 'key': self.api_key,\n })\n response = requests.post(\n url,\n data = json.dumps(data),\n headers = {'Content-type': 'application/json'},\n auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org\n )\n output = self.process_response(response)\n return output\n",
"def get_entity(self, ilx_id: str) -> dict:\n \"\"\" Gets full meta data (expect their annotations and relationships) from is ILX ID \"\"\"\n ilx_id = self.fix_ilx(ilx_id)\n url = self.base_url + \"ilx/search/identifier/{identifier}?key={api_key}\".format(\n identifier = ilx_id,\n api_key = self.api_key,\n )\n return self.get(url)\n",
"def get_relationship_via_tid(self, tid: str) -> dict:\n url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(\n tid = tid,\n api_key = self.api_key,\n )\n return self.get(url)\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output
|
tgbugs/ontquery | ontquery/plugins/interlex_client.py | InterLexClient.delete_relationship | python | def delete_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_data + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_data + ' does not exist'
)
data = {
'term1_id': ' ', #entity1_data['id'],
'relationship_tid': ' ', #relationship_data['id'],
'term2_id': ' ',#entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
entity_relationships = self.get_relationship_via_tid(entity1_data['id'])
# TODO: parse through entity_relationships to see if we have a match; else print warning and return None
relationship_id = None
for relationship in entity_relationships:
if str(relationship['term1_id']) == str(entity1_data['id']):
if str(relationship['term2_id']) == str(entity2_data['id']):
if str(relationship['relationship_tid']) == str(relationship_data['id']):
relationship_id = relationship['id']
break
if not relationship_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-relationship/{id}'.format(id=relationship_id)
output = self.post(
url = url,
data = data,
)
return output | Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L715-L776 | [
"def get_entity(self, ilx_id: str) -> dict:\n \"\"\" Gets full meta data (expect their annotations and relationships) from is ILX ID \"\"\"\n ilx_id = self.fix_ilx(ilx_id)\n url = self.base_url + \"ilx/search/identifier/{identifier}?key={api_key}\".format(\n identifier = ilx_id,\n api_key = self.api_key,\n )\n return self.get(url)\n",
"def get_relationship_via_tid(self, tid: str) -> dict:\n url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(\n tid = tid,\n api_key = self.api_key,\n )\n return self.get(url)\n"
] | class InterLexClient:
""" Connects to SciCrunch via its' api endpoints
Purpose is to allow external curators to add entities and annotations to those entities.
Functions To Use:
add_entity
add_annotation
Notes On Time Complexity:
Function add_entity, if added an entity successfully, will hit a least 5 endpoints. This
may cause it to take a few seconds to load each entity into SciCrunch. Function
add_annotation is a little more forgiving with it only hitting 3 minimum.
"""
class Error(Exception): pass
class SuperClassDoesNotExistError(Error):
""" The superclass listed does not exist! """
class EntityDoesNotExistError(Error):
""" The entity listed does not exist! """
class BadResponseError(Error): pass
class NoLabelError(Error): pass
class NoTypeError(Error): pass
class MissingKeyError(Error): pass
class IncorrectKeyError(Error): pass
class IncorrectAPIKeyError(Error): pass
default_base_url = 'https://scicrunch.org/api/1/'
ilx_base_url = 'http://uri.interlex.org/base/'
def __init__(self, api_key: str, base_url: str = default_base_url):
self.api_key = api_key
self.base_url = base_url
user_info_url = self.default_base_url + 'user/info?key=' + self.api_key
self.check_api_key(user_info_url)
self.user_id = str(self.get(user_info_url)['id'])
def check_api_key(self, url):
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
if response.status_code not in [200, 201]: # Safety catch.
raise self.IncorrectAPIKeyError('api_key given is incorrect.')
def process_response(self, response: requests.models.Response) -> dict:
""" Checks for correct data response and status codes """
try:
output = response.json()
except json.JSONDecodeError: # Server is having a bad day and crashed.
raise self.BadResponseError(
'Json not returned with status code [' + str(response.status_code) + ']')
if response.status_code == 400:
return output
if response.status_code not in [200, 201]: # Safety catch.
raise self.BadResponseError(
str(output) + ': with status code [' + str(response.status_code) +
'] and params:' + str(output))
return output['data']
def get(self, url: str) -> List[dict]:
""" Requests data from database """
response = requests.get(
url,
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def post(self, url: str, data: List[dict]) -> List[dict]:
""" Gives data to database """
data.update({
'key': self.api_key,
})
response = requests.post(
url,
data = json.dumps(data),
headers = {'Content-type': 'application/json'},
auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org
)
output = self.process_response(response)
return output
def fix_ilx(self, ilx_id: str) -> str:
""" Database only excepts lower case and underscore version of ID """
# FIXME probably .rsplit('/', 1) is the more correct version of this
# and because this is nominally a 'private' interface these should be
ilx_id = ilx_id.replace('http://uri.interlex.org/base/', '')
if ilx_id[:4] not in ['TMP:', 'tmp_', 'ILX:', 'ilx_']:
raise ValueError(
'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id)
return ilx_id.replace('ILX:', 'ilx_').replace('TMP:', 'tmp_')
def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity
def process_synonyms(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for synonyms in entity """
label = entity['label']
for synonym in entity['synonyms']:
# these are internal errors and users should never see them
if 'literal' not in synonym:
raise ValueError(f'Synonym not given a literal for label: {label}')
elif len(synonym) > 1:
raise ValueError(f'Too many keys in synonym for label: {label}')
return entity
def process_existing_ids(self, entity: List[dict]) -> List[dict]:
""" Making sure key/value is in proper format for existing_ids in entity """
label = entity['label']
existing_ids = entity['existing_ids']
for existing_id in existing_ids:
if 'curie' not in existing_id or 'iri' not in existing_id:
raise ValueError(
f'Missing needing key(s) in existing_ids for label: {label}')
elif len(existing_id) > 2:
raise ValueError(
f'Extra keys not recognized in existing_ids for label: {label}')
return entity
def crude_search_scicrunch_via_label(self, label:str) -> dict:
""" Server returns anything that is simlar in any catagory """
url = self.base_url + 'term/search/{term}?key={api_key}'.format(
term = label,
api_key = self.api_key,
)
return self.get(url)
def check_scicrunch_for_label(self, label: str) -> dict:
""" Sees if label with your user ID already exists
There are can be multiples of the same label in interlex, but there should only be one
label with your user id. Therefore you can create labels if there already techniqually
exist, but not if you are the one to create it.
"""
list_of_crude_matches = self.crude_search_scicrunch_via_label(label)
for crude_match in list_of_crude_matches:
# If labels match
if crude_match['label'].lower().strip() == label.lower().strip():
complete_data_of_crude_match = self.get_entity(crude_match['ilx'])
crude_match_label = crude_match['label']
crude_match_user_id = complete_data_of_crude_match['uid']
# If label was created by you
if str(self.user_id) == str(crude_match_user_id):
return complete_data_of_crude_match # You created the entity already
# No label AND user id match
return {}
def get_entity(self, ilx_id: str) -> dict:
""" Gets full meta data (expect their annotations and relationships) from is ILX ID """
ilx_id = self.fix_ilx(ilx_id)
url = self.base_url + "ilx/search/identifier/{identifier}?key={api_key}".format(
identifier = ilx_id,
api_key = self.api_key,
)
return self.get(url)
def add_entity(
self,
label: str,
type: str,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
template_entity_input = {k:v for k, v in locals().items() if k != 'self' and v}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
if not label:
raise self.NoLabelError('Entity needs a label')
if not type:
raise self.NoTypeError('Entity needs a type')
entity_input = {
'label': label,
'type': type,
}
if definition:
entity_input['definition'] = definition
if comment:
entity_input['comment'] = comment
if superclass:
entity_input['superclass'] = {'ilx_id':self.fix_ilx(superclass)}
if synonyms:
entity_input['synonyms'] = [{'literal': syn} for syn in synonyms]
raw_entity_outout = self.add_raw_entity(entity_input)
# Sanity check -> output same as input, but filled with response data
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
for key, value in template_entity_input.items():
if key == 'superclass':
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
else:
entity_output[key] = str(raw_entity_outout[key])
# skip this for now, I check it downstream be cause I'm paranoid, but in this client it is
# safe to assume that the value given will be the value returned if there is a return at all
# it also isn't that they match exactly, because some new values (e.g. iri and curie) are expected
#if entity_output != template_entity_input:
# DEBUG: helps see what's wrong; might want to make a clean version of this
# for key, value in template_entity_input.items():
# if template_entity_input[key] != entity_output[key]:
# print(template_entity_input[key], entity_output[key])
#raise self.BadResponseError('The server did not return proper data!')
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def add_raw_entity(self, entity: dict) -> dict:
""" Adds entity if it does not already exist under your user ID.
Need to provide a list of dictionaries that have at least the key/values
for label and type. If given a key, the values provided must be in the
format shown in order for the server to except them. You can input
multiple synonyms, or existing_ids.
Entity type can be any of the following: term, pde, fde, cde, annotation, or relationship
Options Template:
entity = {
'label': '',
'type': '',
'definition': '',
'comment': '',
'superclass': {
'ilx_id': ''
},
'synonyms': [
{
'literal': ''
},
],
'existing_ids': [
{
'iri': '',
'curie': '',
},
],
}
Minimum Needed:
entity = {
'label': '',
'type': '',
}
Example:
entity = {
'label': 'brain',
'type': 'pde',
'definition': 'Part of the central nervous system',
'comment': 'Cannot live without it',
'superclass': {
'ilx_id': 'ilx_0108124', # ILX ID for Organ
},
'synonyms': [
{
'literal': 'Encephalon'
},
{
'literal': 'Cerebro'
},
],
'existing_ids': [
{
'iri': 'http://uri.neuinfo.org/nif/nifstd/birnlex_796',
'curie': 'BIRNLEX:796',
},
],
}
"""
needed_in_entity = set([
'label',
'type',
])
options_in_entity = set([
'label',
'type',
'definition',
'comment',
'superclass',
'synonyms',
'existing_ids'
])
prime_entity_url = self.base_url + 'ilx/add'
add_entity_url = self.base_url + 'term/add'
### Checking if key/value format is correct ###
# Seeing if you are missing a needed key
if (set(entity) & needed_in_entity) != needed_in_entity:
raise self.MissingKeyError(
'You need key(s): '+ str(needed_in_entity - set(entity)))
# Seeing if you have other options not included in the description
elif (set(entity) | options_in_entity) != options_in_entity:
raise self.IncorrectKeyError(
'Unexpected key(s): ' + str(set(entity) - options_in_entity))
entity['type'] = entity['type'].lower() # BUG: server only takes lowercase
if entity['type'] not in ['term', 'relationship', 'annotation', 'cde', 'fde', 'pde']:
raise TypeError(
'Entity should be one of the following: ' +
'term, relationship, annotation, cde, fde, pde')
if entity.get('superclass'):
entity = self.process_superclass(entity)
if entity.get('synonyms'):
entity = self.process_synonyms(entity)
if entity.get('existing_ids'):
entity = self.process_existing_ids(entity)
entity['uid'] = self.user_id # BUG: php lacks uid update
### Adding entity to SciCrunch ###
entity['term'] = entity.pop('label') # ilx/add nuance
ilx_data = self.post(
url = prime_entity_url,
data = entity.copy(),
) # requesting spot in server for entity
if ilx_data.get('ilx'):
ilx_id = ilx_data['ilx']
else:
ilx_id = ilx_data['fragment'] # beta.scicrunch.org
entity['label'] = entity.pop('term') # term/add nuance
entity['ilx'] = ilx_id # need entity ilx_id to place entity in db
output = self.post(
url = add_entity_url,
data = entity.copy(),
) # data represented in SciCrunch interface
### Checking if label already exisits ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
prexisting_data = self.check_scicrunch_for_label(entity['label'])
if prexisting_data:
print(
'You already added entity', entity['label'],
'with ILX ID:', prexisting_data['ilx'])
return prexisting_data
self.Error(output) # FIXME what is the correct error here?
self.Error(output) # FIXME what is the correct error here?
# BUG: server output incomplete compared to search via ilx ids
output = self.get_entity(output['ilx'])
return output
def update_entity(
self,
ilx_id: str,
label: str = None,
type: str = None,
definition: str = None,
comment: str = None,
superclass: str = None,
synonyms: list = None) -> dict:
""" Updates pre-existing entity as long as the api_key is from the account that created it
Args:
label: name of entity
type: entities type
Can be any of the following: term, cde, fde, pde, annotation, relationship
definition: entities definition
comment: a foot note regarding either the interpretation of the data or the data itself
superclass: entity is a sub-part of this entity
Example: Organ is a superclass to Brain
synonyms: entity synonyms
Returns:
Server response that is a nested dictionary format
"""
template_entity_input = {k:v for k, v in locals().copy().items() if k != 'self'}
if template_entity_input.get('superclass'):
template_entity_input['superclass'] = self.fix_ilx(template_entity_input['superclass'])
existing_entity = self.get_entity(ilx_id=ilx_id)
if not existing_entity['id']: # TODO: Need to make a proper ilx_id check error
raise self.EntityDoesNotExistError(
f'ilx_id provided {ilx_id} does not exist')
update_url = self.base_url + 'term/edit/{id}'.format(id=existing_entity['id'])
if label:
existing_entity['label'] = label
if type:
existing_entity['type'] = type
if definition:
existing_entity['definition'] = definition
if comment:
existing_entity['comment'] = comment
if superclass:
existing_entity['superclass'] = {'ilx_id': superclass}
existing_entity = self.process_superclass(existing_entity)
# If a match use old data, else append new synonym
if synonyms:
if existing_entity['synonyms']:
new_existing_synonyms = []
existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
for synonym in synonyms:
existing_synonym = existing_synonyms.get(synonym.lower())
if not existing_synonym:
new_existing_synonyms.append({'literal': synonym})
else:
new_existing_synonyms.append(existing_synonym)
existing_entity['synonyms'] = new_existing_synonyms
# Just in case I need this...
# if synonyms_to_delete:
# if existing_entity['synonyms']:
# remaining_existing_synonyms = []
# existing_synonyms = {syn['literal'].lower():syn for syn in existing_entity['synonyms']}
# for synonym in synonyms:
# if existing_synonyms.get(synonym.lower()):
# existing_synonyms.pop(synonym.lower())
# else:
# print('WARNING: synonym you wanted to delete', synonym, 'does not exist')
# existing_entity['synonyms'] = list(existing_synonyms.values())
response = self.post(
url = update_url,
data = existing_entity,
)
# BUG: server response is bad and needs to actually search again to get proper format
raw_entity_outout = self.get_entity(response['ilx'])
entity_output = {}
ics = [(e['iri'], e['curie'])
for e in raw_entity_outout['existing_ids']]
entity_output['iri'], entity_output['curie'] = sorted((i, c)
for i, c in ics
if 'ilx_' in i)[0]
### FOR NEW BETA. Old can have 'ilx_' in the ids ###
if 'tmp' in raw_entity_outout['ilx']:
_id = raw_entity_outout['ilx'].split('_')[-1]
entity_output['iri'] = 'http://uri.interlex.org/base/tmp_' + _id
entity_output['curie'] = 'TMP:' + _id
print(template_entity_input)
for key, value in template_entity_input.items():
if key == 'superclass':
if raw_entity_outout.get('superclasses'):
entity_output[key] = raw_entity_outout['superclasses'][0]['ilx']
elif key == 'synonyms':
entity_output[key] = [syn['literal']
for syn in raw_entity_outout['synonyms']]
elif key == 'ilx_id':
pass
else:
entity_output[key] = str(raw_entity_outout[key])
if entity_output.get('superclass'):
entity_output['superclass'] = self.ilx_base_url + entity_output['superclass']
entity_output['ilx'] = self.ilx_base_url + raw_entity_outout['ilx']
return entity_output
def get_annotation_via_tid(self, tid: str) -> dict:
""" Gets annotation via anchored entity id """
url = self.base_url + 'term/get-annotations/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" Adding an annotation value to a prexisting entity
An annotation exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type annotation
3. string value of the annotation
Example:
annotation = {
'term_ilx_id': 'ilx_0101431', # brain ILX ID
'annotation_type_ilx_id': 'ilx_0381360', # hasDbXref ILX ID
'annotation_value': 'http://neurolex.org/wiki/birnlex_796',
}
"""
url = self.base_url + 'term/add-annotation'
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
data = {
'tid': term_data['id'],
'annotation_tid': anno_data['id'],
'value': annotation_value,
'term_version': term_data['version'],
'annotation_term_version': anno_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual annotation properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_annotations = self.get_annotation_via_tid(term_data['id'])
for term_annotation in term_annotations:
if str(term_annotation['annotation_tid']) == str(anno_data['id']):
if term_annotation['value'] == data['value']:
print(
'Annotation: [' + term_data['label'] + ' -> ' + anno_data['label'] +
' -> ' + data['value'] + '], already exists.'
)
return term_annotation
exit(output)
exit(output)
return output
def delete_annotation(
self,
term_ilx_id: str,
annotation_type_ilx_id: str,
annotation_value: str) -> dict:
""" If annotation doesnt exist, add it
"""
term_data = self.get_entity(term_ilx_id)
if not term_data['id']:
exit(
'term_ilx_id: ' + term_ilx_id + ' does not exist'
)
anno_data = self.get_entity(annotation_type_ilx_id)
if not anno_data['id']:
exit(
'annotation_type_ilx_id: ' + annotation_type_ilx_id +
' does not exist'
)
entity_annotations = self.get_annotation_via_tid(term_data['id'])
annotation_id = ''
for annotation in entity_annotations:
if str(annotation['tid']) == str(term_data['id']):
if str(annotation['annotation_tid']) == str(anno_data['id']):
if str(annotation['value']) == str(annotation_value):
annotation_id = annotation['id']
break
if not annotation_id:
print('''WARNING: Annotation you wanted to delete does not exist ''')
return None
url = self.base_url + 'term/edit-annotation/{annotation_id}'.format(
annotation_id = annotation_id
)
data = {
'tid': ' ', # for delete
'annotation_tid': ' ', # for delete
'value': ' ', # for delete
'term_version': ' ',
'annotation_term_version': ' ',
}
output = self.post(
url = url,
data = data,
)
# check output
return output
def get_relationship_via_tid(self, tid: str) -> dict:
url = self.base_url + 'term/get-relationships/{tid}?key={api_key}'.format(
tid = tid,
api_key = self.api_key,
)
return self.get(url)
def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output
|
tgbugs/ontquery | ontquery/terms.py | OntCuries.populate | python | def populate(cls, graph):
[graph.bind(k, v) for k, v in cls._dict.items()] | populate an rdflib graph with these curies | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/terms.py#L51-L53 | null | class OntCuries(metaclass=dictclass):
""" A bad implementation of a singleton dictionary based namespace.
Probably better to use metaclass= to init this so types can be tracked.
"""
# TODO how to set an OntCuries as the default...
def __new__(cls, *args, **kwargs):
#if not hasattr(cls, '_' + cls.__name__ + '_dict'):
if not hasattr(cls, '_dict'):
cls._dict = {}
cls._n_to_p = {}
cls._strie = {}
cls._trie = {}
for p, namespace in dict(*args, **kwargs).items():
sn = str(namespace)
trie.insert_trie(cls._trie, sn)
cls._dict[p] = sn
cls._n_to_p[sn] = p
if args or kwargs:
cls._pn = sorted(cls._dict.items(), key=lambda kv: len(kv[1]), reverse=True)
return cls._dict
@classmethod
@classmethod
def qname(cls, iri):
# while / is not *technically* allowed in prefix names by ttl
# RDFa and JSON-LD do allow it, so we are going to allow it too
# TODO cache the output mapping?
try:
namespace, suffix = trie.split_uri(iri)
except ValueError as e:
try:
namespace = str(iri)
prefix = cls._n_to_p[namespace]
return prefix + ':'
except KeyError as e:
return iri # can't split it then we're in trouble probably
if namespace not in cls._strie:
trie.insert_strie(cls._strie, cls._trie, namespace)
if cls._strie[namespace]:
pl_namespace = trie.get_longest_namespace(cls._strie[namespace], iri)
if pl_namespace is not None:
namespace = pl_namespace
suffix = iri[len(namespace):]
try:
prefix = cls._n_to_p[namespace]
return ':'.join((prefix, suffix))
except KeyError:
new_iri = namespace[:-1]
sep = namespace[-1]
qname = cls.qname(new_iri)
# this works because when we get to an unsplitable case we simply fail
# caching can help with performance here because common prefixes that
# have not been shortened will show up in the cache
return qname + sep + suffix
@classmethod
def _qname_old(cls, iri):
# sort in reverse to match longest matching namespace first TODO/FIXME trie
for prefix, namespace in cls._pn:
if iri.startswith(namespace):
suffix = iri[len(namespace):]
return ':'.join((prefix, suffix))
return iri
|
tgbugs/ontquery | ontquery/plugins/services.py | InterLexRemote.add_triple | python | def add_triple(self, subject, predicate, object):
def filter_ontid(ontid):
if ontid.startswith('http://'):
pass
elif ontid.prefix == 'ILXTEMP':
ontid = 'tmp_' + ontid.suffix
else:
ontid = 'ilx_' + ontid.suffix
return ontid
# this split between annotations and relationships is severely annoying
# because you have to know before hand which one it is (sigh)
s = OntId(subject)
p = OntId(predicate)
o = self._get_type(object)
if type(o) == str:
func = self.ilx_cli.add_annotation
elif type(o) == OntId:
func = self.ilx_cli.add_relationship
o = filter_ontid(o)
else:
raise TypeError(f'what are you giving me?! {object!r}')
s = filter_ontid(s)
p = filter_ontid(p)
resp = func(s, p, o)
return resp | Triple of curied or full iris to add to graph.
Subject should be an interlex | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/services.py#L468-L498 | [
"def _get_type(self, entity):\n try:\n return OntId(entity)\n except OntId.Error:\n return entity\n",
"def filter_ontid(ontid):\n if ontid.startswith('http://'):\n pass\n elif ontid.prefix == 'ILXTEMP':\n ontid = 'tmp_' + ontid.suffix\n else:\n ontid = 'ilx_' + ontid.suffix\n return ontid\n"
] | class InterLexRemote(OntService): # note to self
known_inverses = ('', ''),
defaultEndpoint = 'https://scicrunch.org/api/1/'
def __init__(self, *args, api_key=None, apiEndpoint=defaultEndpoint, host='uri.interlex.org', port='',
user_curies: dict={'ILX', 'http://uri.interlex.org/base/ilx_'}, # FIXME hardcoded
readonly=False,
**kwargs):
""" user_curies is a local curie mapping from prefix to a uri
This usually is a full http://uri.interlex.org/base/ilx_1234567 identifier """
if api_key is None:
import os
try:
self.api_key = os.environ.get('INTERLEX_API_KEY', os.environ.get('SCICRUNCH_API_KEY', None))
except KeyError:
self.api_key = None
if self.api_key is None and apiEndpoint == self.defaultEndpoint:
# we don't error here because API keys are not required for viewing
print('WARNING: You have not set an API key for the SciCrunch API!')
else:
self.api_key = api_key
self.apiEndpoint = apiEndpoint
try:
requests
except NameError:
raise ModuleNotFoundError('You need to install requests to use this service') from requests_missing
self.host = host
self.port = port
self.user_curies = user_curies
self.readonly = readonly
self._graph_cache = {}
self.Graph = rdflib.Graph
self.RDF = rdflib.RDF
self.OWL = rdflib.OWL
self.URIRef = rdflib.URIRef
#self.curies = requests.get(f'http://{self.host}:{self.port}/base/curies').json() # FIXME TODO
# here we see that the original model for curies doesn't quite hold up
# we need to accept local curies, but we also have to have them
# probably best to let the user populate their curies from interlex
# at the start, rather than having it be completely wild
# FIXME can't do this at the moment because interlex itself calls this --- WHOOPS
super().__init__(*args, **kwargs)
def setup(self):
OntCuries({'ILXTEMP':'http://uri.interlex.org/base/tmp_'})
if self.api_key is not None and self.apiEndpoint is not None:
self.ilx_cli = InterLexClient(api_key=self.api_key,
base_url=self.apiEndpoint,)
elif not self.readonly:
# expect attribute errors for ilx_cli
print('WARNING: You have not set an API key for the SciCrunch API! '
'InterLexRemote will error if you try to use it.')
super().setup()
@property
def host_port(self):
return f'{self.host}:{self.port}' if self.port else self.host
@property
def predicates(self):
return {} # TODO
def add_class(self,
subClassOf=None,
label=None,
definition=None,
synonyms=tuple(),
comment=None,
predicates: dict=None):
return self.add_entity('term', subClassOf, label, definition, synonyms, comment, predicates)
def add_pde(self,
label,
definition:str=None,
synonyms=tuple(),
comment: str=None,
predicates: dict=None):
return self.add_entity(
type = 'pde',
label = label,
subThingOf = None, # FIXME works for now
definition = definition,
synonyms = synonyms,
comment = comment,
predicates = predicates)
def add_predicates(self, ilx_curieoriri: str, predicate_objects_dict: dict) -> list:
tresp = []
if not ilx_curieoriri.startswith('http://uri.interlex.org/base/'): # FIXME: need formality
subject = 'http://uri.interlex.org/base/' + ilx_curieoriri
else:
subject = ilx_curieoriri
for predicate, objs in predicate_objects_dict.items():
if not isinstance(objs, list):
objs = [objs]
for object in objs:
# server output doesnt include their ILX IDs ... so it's not worth getting
tresp.append(self.add_triple(subject, predicate, object))
# TODO stick the responding predicates etc in if success
return tresp
def delete_predicates(self, ilx_curieoriri: str, predicate_objects_dict: dict) -> list:
tresp = []
if not ilx_curieoriri.startswith('http://uri.interlex.org/base/'): # FIXME: need formality
subject = 'http://uri.interlex.org/base/' + ilx_curieoriri
else:
subject = ilx_curieoriri
for predicate, objs in predicate_objects_dict.items():
if not isinstance(objs, list):
objs = [objs]
for object in objs:
# server output doesnt include their ILX IDs ... so it's not worth getting
tresp.append(self.delete_triple(subject, predicate, object))
# TODO stick the responding predicates etc in if success
return tresp
def add_entity(self, type, subThingOf, label, definition: str=None,
synonyms=tuple(), comment: str=None, predicates: dict=None):
if self.readonly:
raise exc.ReadOnlyError('InterLexRemote is in readonly mode.')
resp = self.ilx_cli.add_entity(
label = label,
type = type,
superclass = subThingOf,
definition = definition,
comment = comment,
synonyms = synonyms,
)
out_predicates = {}
if predicates:
tresp = self.add_predicates(ilx_curieoriri=resp['ilx'], predicate_objects_dict=predicates)
resp['annotations'] = tresp # TODO: Creates a record for annotations in term_versions table
if 'comment' in resp: # filtering of missing fields is done in the client
out_predicates['comment'] = resp['comment']
return QueryResult(
query_args = {},
iri=resp['iri'],
curie=resp['curie'],
label=resp['label'],
labels=tuple(),
#abbrev=None, # TODO
#acronym=None, # TODO
definition=resp.get('definition', None),
synonyms=tuple(resp.get('synonyms', tuple())),
#deprecated=None,
#prefix=None,
#category=None,
predicates=out_predicates,
#_graph=None,
source=self,
)
def update_entity(self, ilx_id: str=None, type: str=None, subThingOf: str=None, label: str=None,
definition: str=None, synonyms=tuple(), comment: str=None,
predicates_to_add: dict=None, predicates_to_delete: dict=None):
resp = self.ilx_cli.update_entity(
ilx_id = ilx_id,
label = label,
type = type,
superclass = subThingOf,
definition = definition,
comment = comment,
synonyms = synonyms,
# predicates = tresp,
)
tresp = None
if predicates_to_add:
trep = self.add_predicates(ilx_curieoriri=resp['ilx'], predicate_objects_dict=predicates_to_add)
tresp = None
if predicates_to_delete:
trep = self.delete_predicates(ilx_curieoriri=resp['ilx'], predicate_objects_dict=predicates_to_delete)
out_predicates = {}
if 'comment' in resp: # filtering of missing fields is done in the client
out_predicates['comment'] = resp['comment']
return QueryResult(
query_args = {},
iri=resp['iri'],
curie=resp['curie'],
label=resp['label'],
labels=tuple(),
#abbrev=None, # TODO
#acronym=None, # TODO
definition=resp['definition'],
synonyms=tuple(resp['synonyms']),
#deprecated=None,
#prefix=None,
#category=None,
predicates=out_predicates,
#_graph=None,
source=self,
)
def delete_triple(self, subject, predicate, object):
""" Triple of curied or full iris to add to graph.
Subject should be an interlex"""
def filter_ontid(ontid):
if ontid.startswith('http://'):
pass
elif ontid.prefix == 'ILXTEMP':
ontid = 'tmp_' + ontid.suffix
else:
ontid = 'ilx_' + ontid.suffix
return ontid
# this split between annotations and relationships is severely annoying
# because you have to know before hand which one it is (sigh)
s = OntId(subject)
p = OntId(predicate)
o = self._get_type(object)
if type(o) == str:
func = self.ilx_cli.delete_annotation
elif type(o) == OntId:
func = self.ilx_cli.delete_relationship
o = filter_ontid(o)
else:
raise TypeError(f'what are you giving me?! {object!r}')
s = filter_ontid(s)
p = filter_ontid(p)
# TODO: check if add_relationship works
resp = func(s, p, o)
return resp
def _get_type(self, entity):
try:
return OntId(entity)
except OntId.Error:
return entity
def query(self, iri=None, curie=None, label=None, term=None, predicates=None, **_):
kwargs = cullNone(iri=iri, curie=curie, label=label, term=term, predicates=predicates)
def get(url, headers={'Accept':'application/n-triples'}): # FIXME extremely slow?
with requests.Session() as s:
s.headers.update(headers)
resp = s.get(url, allow_redirects=False)
while resp.is_redirect and resp.status_code < 400: # FIXME redirect loop issue
# using send means that our headers don't show up in every request
resp = s.get(resp.next.url, allow_redirects=False)
if not resp.is_redirect:
break
return resp
def isAbout(g):
ontid, *r1 = g[:self.RDF.type:self.OWL.Ontology]
o, *r2 = g[ontid:self.URIRef('http://purl.obolibrary.org/obo/IAO_0000136')]
if r1 or r2:
raise ValueError(f'NonUnique value for ontology {r1} or about {r2}')
return o
if iri:
oiri = OntId(iri=iri)
icurie = oiri.curie
if curie and icurie != curie:
raise ValueError(f'curie and curied iri do not match {curie} {icurie}')
else:
curie = icurie
elif curie:
iri = OntId(curie).iri
if curie:
if curie.startswith('ILX:') and iri:
# FIXME hack, can replace once the new resolver is up
url = iri.replace('uri.interlex.org', self.host_port)
else:
url = f'http://{self.host_port}/base/curies/{curie}?local=True'
elif label:
url = f'http://{self.host_port}/base/lexical/{label}'
else:
return None
if url in self._graph_cache:
graph = self._graph_cache[url]
if not graph:
return None
else:
resp = get(url)
if not resp.ok:
self._graph_cache[url] = None
return None
ttl = resp.content
graph = self.Graph().parse(data=ttl, format='turtle')
self._graph_cache[url] = graph
ia_iri = isAbout(graph)
rdll = rdflibLocal(graph)
if True:
#qrs = rdll.query(label=label, predicates=predicates, all_classes=True) # label=label issue?
qrs = rdll.query(predicates=predicates, all_classes=True)
qrd = {'predicates': {}} # FIXME iri can be none?
toskip = 'predicates',
if curie is None and iri is None:
i = OntId(ia_iri)
qrd['curie'] = i.curie
qrd['iri'] = i.iri
toskip += 'curie', 'iri'
if curie:
qrd['curie'] = curie
toskip += 'curie',
if iri:
qrd['iri'] = iri
toskip += 'iri',
for qr in qrs:
#print(tc.ltgreen(str(qr)))
# FIXME still last one wins behavior
n = {k:v for k, v in qr.items()
if k not in toskip
and v is not None}
qrd.update(n)
qrd['predicates'].update(cullNone(**qr['predicates']))
qrd['source'] = self
#print(tc.ltyellow(str(qrd)))
yield QueryResult(kwargs, **qrd)
else:
# TODO cases where ilx is preferred will be troublesome
maybe_out = [r for r in rdll.query(curie=curie, label=label, predicates=predicates)]
if maybe_out:
out = maybe_out
else:
out = rdll.query(iri=ia_iri, label=label, predicates=predicates)
if curie:
for qr in out:
qr = cullNone(**qr)
yield QueryResult(kwargs, #qr._QueryResult__query_args,
curie=curie,
**{k:v for k, v in qr.items()
if k != 'curie' })
return
yield from out
|
tgbugs/ontquery | ontquery/plugins/services.py | InterLexRemote.delete_triple | python | def delete_triple(self, subject, predicate, object):
def filter_ontid(ontid):
if ontid.startswith('http://'):
pass
elif ontid.prefix == 'ILXTEMP':
ontid = 'tmp_' + ontid.suffix
else:
ontid = 'ilx_' + ontid.suffix
return ontid
# this split between annotations and relationships is severely annoying
# because you have to know before hand which one it is (sigh)
s = OntId(subject)
p = OntId(predicate)
o = self._get_type(object)
if type(o) == str:
func = self.ilx_cli.delete_annotation
elif type(o) == OntId:
func = self.ilx_cli.delete_relationship
o = filter_ontid(o)
else:
raise TypeError(f'what are you giving me?! {object!r}')
s = filter_ontid(s)
p = filter_ontid(p)
# TODO: check if add_relationship works
resp = func(s, p, o)
return resp | Triple of curied or full iris to add to graph.
Subject should be an interlex | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/services.py#L500-L531 | [
"def _get_type(self, entity):\n try:\n return OntId(entity)\n except OntId.Error:\n return entity\n",
"def filter_ontid(ontid):\n if ontid.startswith('http://'):\n pass\n elif ontid.prefix == 'ILXTEMP':\n ontid = 'tmp_' + ontid.suffix\n else:\n ontid = 'ilx_' + ontid.suffix\n return ontid\n"
] | class InterLexRemote(OntService): # note to self
known_inverses = ('', ''),
defaultEndpoint = 'https://scicrunch.org/api/1/'
def __init__(self, *args, api_key=None, apiEndpoint=defaultEndpoint, host='uri.interlex.org', port='',
user_curies: dict={'ILX', 'http://uri.interlex.org/base/ilx_'}, # FIXME hardcoded
readonly=False,
**kwargs):
""" user_curies is a local curie mapping from prefix to a uri
This usually is a full http://uri.interlex.org/base/ilx_1234567 identifier """
if api_key is None:
import os
try:
self.api_key = os.environ.get('INTERLEX_API_KEY', os.environ.get('SCICRUNCH_API_KEY', None))
except KeyError:
self.api_key = None
if self.api_key is None and apiEndpoint == self.defaultEndpoint:
# we don't error here because API keys are not required for viewing
print('WARNING: You have not set an API key for the SciCrunch API!')
else:
self.api_key = api_key
self.apiEndpoint = apiEndpoint
try:
requests
except NameError:
raise ModuleNotFoundError('You need to install requests to use this service') from requests_missing
self.host = host
self.port = port
self.user_curies = user_curies
self.readonly = readonly
self._graph_cache = {}
self.Graph = rdflib.Graph
self.RDF = rdflib.RDF
self.OWL = rdflib.OWL
self.URIRef = rdflib.URIRef
#self.curies = requests.get(f'http://{self.host}:{self.port}/base/curies').json() # FIXME TODO
# here we see that the original model for curies doesn't quite hold up
# we need to accept local curies, but we also have to have them
# probably best to let the user populate their curies from interlex
# at the start, rather than having it be completely wild
# FIXME can't do this at the moment because interlex itself calls this --- WHOOPS
super().__init__(*args, **kwargs)
def setup(self):
OntCuries({'ILXTEMP':'http://uri.interlex.org/base/tmp_'})
if self.api_key is not None and self.apiEndpoint is not None:
self.ilx_cli = InterLexClient(api_key=self.api_key,
base_url=self.apiEndpoint,)
elif not self.readonly:
# expect attribute errors for ilx_cli
print('WARNING: You have not set an API key for the SciCrunch API! '
'InterLexRemote will error if you try to use it.')
super().setup()
@property
def host_port(self):
return f'{self.host}:{self.port}' if self.port else self.host
@property
def predicates(self):
return {} # TODO
def add_class(self,
subClassOf=None,
label=None,
definition=None,
synonyms=tuple(),
comment=None,
predicates: dict=None):
return self.add_entity('term', subClassOf, label, definition, synonyms, comment, predicates)
def add_pde(self,
label,
definition:str=None,
synonyms=tuple(),
comment: str=None,
predicates: dict=None):
return self.add_entity(
type = 'pde',
label = label,
subThingOf = None, # FIXME works for now
definition = definition,
synonyms = synonyms,
comment = comment,
predicates = predicates)
def add_predicates(self, ilx_curieoriri: str, predicate_objects_dict: dict) -> list:
tresp = []
if not ilx_curieoriri.startswith('http://uri.interlex.org/base/'): # FIXME: need formality
subject = 'http://uri.interlex.org/base/' + ilx_curieoriri
else:
subject = ilx_curieoriri
for predicate, objs in predicate_objects_dict.items():
if not isinstance(objs, list):
objs = [objs]
for object in objs:
# server output doesnt include their ILX IDs ... so it's not worth getting
tresp.append(self.add_triple(subject, predicate, object))
# TODO stick the responding predicates etc in if success
return tresp
def delete_predicates(self, ilx_curieoriri: str, predicate_objects_dict: dict) -> list:
tresp = []
if not ilx_curieoriri.startswith('http://uri.interlex.org/base/'): # FIXME: need formality
subject = 'http://uri.interlex.org/base/' + ilx_curieoriri
else:
subject = ilx_curieoriri
for predicate, objs in predicate_objects_dict.items():
if not isinstance(objs, list):
objs = [objs]
for object in objs:
# server output doesnt include their ILX IDs ... so it's not worth getting
tresp.append(self.delete_triple(subject, predicate, object))
# TODO stick the responding predicates etc in if success
return tresp
def add_entity(self, type, subThingOf, label, definition: str=None,
synonyms=tuple(), comment: str=None, predicates: dict=None):
if self.readonly:
raise exc.ReadOnlyError('InterLexRemote is in readonly mode.')
resp = self.ilx_cli.add_entity(
label = label,
type = type,
superclass = subThingOf,
definition = definition,
comment = comment,
synonyms = synonyms,
)
out_predicates = {}
if predicates:
tresp = self.add_predicates(ilx_curieoriri=resp['ilx'], predicate_objects_dict=predicates)
resp['annotations'] = tresp # TODO: Creates a record for annotations in term_versions table
if 'comment' in resp: # filtering of missing fields is done in the client
out_predicates['comment'] = resp['comment']
return QueryResult(
query_args = {},
iri=resp['iri'],
curie=resp['curie'],
label=resp['label'],
labels=tuple(),
#abbrev=None, # TODO
#acronym=None, # TODO
definition=resp.get('definition', None),
synonyms=tuple(resp.get('synonyms', tuple())),
#deprecated=None,
#prefix=None,
#category=None,
predicates=out_predicates,
#_graph=None,
source=self,
)
def update_entity(self, ilx_id: str=None, type: str=None, subThingOf: str=None, label: str=None,
definition: str=None, synonyms=tuple(), comment: str=None,
predicates_to_add: dict=None, predicates_to_delete: dict=None):
resp = self.ilx_cli.update_entity(
ilx_id = ilx_id,
label = label,
type = type,
superclass = subThingOf,
definition = definition,
comment = comment,
synonyms = synonyms,
# predicates = tresp,
)
tresp = None
if predicates_to_add:
trep = self.add_predicates(ilx_curieoriri=resp['ilx'], predicate_objects_dict=predicates_to_add)
tresp = None
if predicates_to_delete:
trep = self.delete_predicates(ilx_curieoriri=resp['ilx'], predicate_objects_dict=predicates_to_delete)
out_predicates = {}
if 'comment' in resp: # filtering of missing fields is done in the client
out_predicates['comment'] = resp['comment']
return QueryResult(
query_args = {},
iri=resp['iri'],
curie=resp['curie'],
label=resp['label'],
labels=tuple(),
#abbrev=None, # TODO
#acronym=None, # TODO
definition=resp['definition'],
synonyms=tuple(resp['synonyms']),
#deprecated=None,
#prefix=None,
#category=None,
predicates=out_predicates,
#_graph=None,
source=self,
)
def add_triple(self, subject, predicate, object):
""" Triple of curied or full iris to add to graph.
Subject should be an interlex"""
def filter_ontid(ontid):
if ontid.startswith('http://'):
pass
elif ontid.prefix == 'ILXTEMP':
ontid = 'tmp_' + ontid.suffix
else:
ontid = 'ilx_' + ontid.suffix
return ontid
# this split between annotations and relationships is severely annoying
# because you have to know before hand which one it is (sigh)
s = OntId(subject)
p = OntId(predicate)
o = self._get_type(object)
if type(o) == str:
func = self.ilx_cli.add_annotation
elif type(o) == OntId:
func = self.ilx_cli.add_relationship
o = filter_ontid(o)
else:
raise TypeError(f'what are you giving me?! {object!r}')
s = filter_ontid(s)
p = filter_ontid(p)
resp = func(s, p, o)
return resp
def _get_type(self, entity):
try:
return OntId(entity)
except OntId.Error:
return entity
def query(self, iri=None, curie=None, label=None, term=None, predicates=None, **_):
kwargs = cullNone(iri=iri, curie=curie, label=label, term=term, predicates=predicates)
def get(url, headers={'Accept':'application/n-triples'}): # FIXME extremely slow?
with requests.Session() as s:
s.headers.update(headers)
resp = s.get(url, allow_redirects=False)
while resp.is_redirect and resp.status_code < 400: # FIXME redirect loop issue
# using send means that our headers don't show up in every request
resp = s.get(resp.next.url, allow_redirects=False)
if not resp.is_redirect:
break
return resp
def isAbout(g):
ontid, *r1 = g[:self.RDF.type:self.OWL.Ontology]
o, *r2 = g[ontid:self.URIRef('http://purl.obolibrary.org/obo/IAO_0000136')]
if r1 or r2:
raise ValueError(f'NonUnique value for ontology {r1} or about {r2}')
return o
if iri:
oiri = OntId(iri=iri)
icurie = oiri.curie
if curie and icurie != curie:
raise ValueError(f'curie and curied iri do not match {curie} {icurie}')
else:
curie = icurie
elif curie:
iri = OntId(curie).iri
if curie:
if curie.startswith('ILX:') and iri:
# FIXME hack, can replace once the new resolver is up
url = iri.replace('uri.interlex.org', self.host_port)
else:
url = f'http://{self.host_port}/base/curies/{curie}?local=True'
elif label:
url = f'http://{self.host_port}/base/lexical/{label}'
else:
return None
if url in self._graph_cache:
graph = self._graph_cache[url]
if not graph:
return None
else:
resp = get(url)
if not resp.ok:
self._graph_cache[url] = None
return None
ttl = resp.content
graph = self.Graph().parse(data=ttl, format='turtle')
self._graph_cache[url] = graph
ia_iri = isAbout(graph)
rdll = rdflibLocal(graph)
if True:
#qrs = rdll.query(label=label, predicates=predicates, all_classes=True) # label=label issue?
qrs = rdll.query(predicates=predicates, all_classes=True)
qrd = {'predicates': {}} # FIXME iri can be none?
toskip = 'predicates',
if curie is None and iri is None:
i = OntId(ia_iri)
qrd['curie'] = i.curie
qrd['iri'] = i.iri
toskip += 'curie', 'iri'
if curie:
qrd['curie'] = curie
toskip += 'curie',
if iri:
qrd['iri'] = iri
toskip += 'iri',
for qr in qrs:
#print(tc.ltgreen(str(qr)))
# FIXME still last one wins behavior
n = {k:v for k, v in qr.items()
if k not in toskip
and v is not None}
qrd.update(n)
qrd['predicates'].update(cullNone(**qr['predicates']))
qrd['source'] = self
#print(tc.ltyellow(str(qrd)))
yield QueryResult(kwargs, **qrd)
else:
# TODO cases where ilx is preferred will be troublesome
maybe_out = [r for r in rdll.query(curie=curie, label=label, predicates=predicates)]
if maybe_out:
out = maybe_out
else:
out = rdll.query(iri=ia_iri, label=label, predicates=predicates)
if curie:
for qr in out:
qr = cullNone(**qr)
yield QueryResult(kwargs, #qr._QueryResult__query_args,
curie=curie,
**{k:v for k, v in qr.items()
if k != 'curie' })
return
yield from out
|
tgbugs/ontquery | ontquery/trie.py | insert_trie | python | def insert_trie(trie, value): # aka get_subtrie_or_insert
if value in trie:
return trie[value]
multi_check = False
for key in tuple(trie.keys()):
if len(value) > len(key) and value.startswith(key):
return insert_trie(trie[key], value)
elif key.startswith(value): # we know the value is not in the trie
if not multi_check:
trie[value] = {}
multi_check = True # there can be multiple longer existing prefixes
dict_ = trie.pop(key) # does not break strie since key<->dict_ remains unchanged
trie[value][key] = dict_
if value not in trie:
trie[value] = {}
return trie[value] | Insert a value into the trie if it is not already contained in the trie.
Return the subtree for the value regardless of whether it is a new value
or not. | train | https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/trie.py#L31-L49 | [
"def insert_trie(trie, value): # aka get_subtrie_or_insert\n \"\"\" Insert a value into the trie if it is not already contained in the trie.\n Return the subtree for the value regardless of whether it is a new value\n or not. \"\"\"\n if value in trie:\n return trie[value]\n multi_check = False\n for key in tuple(trie.keys()):\n if len(value) > len(key) and value.startswith(key):\n return insert_trie(trie[key], value)\n elif key.startswith(value): # we know the value is not in the trie\n if not multi_check:\n trie[value] = {}\n multi_check = True # there can be multiple longer existing prefixes\n dict_ = trie.pop(key) # does not break strie since key<->dict_ remains unchanged\n trie[value][key] = dict_\n if value not in trie:\n trie[value] = {}\n return trie[value]\n"
] | from unicodedata import category
NAME_START_CATEGORIES = ["Ll", "Lu", "Lo", "Lt", "Nl"]
SPLIT_START_CATEGORIES = NAME_START_CATEGORIES + ['Nd']
NAME_CATEGORIES = NAME_START_CATEGORIES + ["Mc", "Me", "Mn", "Lm", "Nd"]
ALLOWED_NAME_CHARS = ["\u00B7", "\u0387", "-", ".", "_", ":"]
XMLNS = "http://www.w3.org/XML/1998/namespace"
def split_uri(uri, split_start=SPLIT_START_CATEGORIES):
if uri.startswith(XMLNS):
return (XMLNS, uri.split(XMLNS)[1])
length = len(uri)
for i in range(0, length):
c = uri[-i - 1]
if not category(c) in NAME_CATEGORIES:
if c in ALLOWED_NAME_CHARS:
continue
for j in range(-1 - i, length):
if category(uri[j]) in split_start or uri[j] == "_":
# _ prevents early split, roundtrip not generate
ns = uri[:j]
if not ns:
break
ln = uri[j:]
return (ns, ln)
break
raise ValueError("Can't split '{}'".format(uri))
def insert_strie(strie, trie, value):
if value not in strie:
strie[value] = insert_trie(trie, value)
def get_longest_namespace(trie, value):
for key in trie:
if value.startswith(key):
out = get_longest_namespace(trie[key], value)
if out is None:
return key
else:
return out
return None
|
baguette-io/baguette-messaging | farine/connectors/sql/__init__.py | setup | python | def setup(settings):
connector = settings.get('db_connector')
if connector == 'postgres':
from playhouse.pool import PooledPostgresqlExtDatabase
return PooledPostgresqlExtDatabase(settings['db_name'],
user=settings['db_user'],
password=settings['db_password'],
host=settings['db_host'],
port=settings.get('db_port'),
max_connections=settings.get('db_max_conn'),
stale_timeout=settings.get('db_stale_timeout'),
timeout=settings.get('db_timeout'),
register_hstore=False) | Setup the database connection. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/connectors/sql/__init__.py#L31-L46 | null | #-*- coding:utf-8 -*-
import json
import farine.discovery
import sel.serializers
from peewee import Model as PModel
from peewee import *
from playhouse.shortcuts import model_to_dict
try:
from playhouse.postgres_ext import *
except ImportError:
pass
class Model(PModel):
"""
Base model that extends the peewee one.
"""
def to_json(self, extras=None):
"""
Convert a model into a json using the playhouse shortcut.
"""
extras = extras or {}
to_dict = model_to_dict(self)
to_dict.update(extras)
return json.dumps(to_dict, cls=sel.serializers.JsonEncoder)
class Meta:
database = None
def init(module, db):
"""
Initialize the models.
"""
for model in farine.discovery.import_models(module):
model._meta.database = db
|
baguette-io/baguette-messaging | farine/connectors/sql/__init__.py | init | python | def init(module, db):
for model in farine.discovery.import_models(module):
model._meta.database = db | Initialize the models. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/connectors/sql/__init__.py#L48-L53 | null | #-*- coding:utf-8 -*-
import json
import farine.discovery
import sel.serializers
from peewee import Model as PModel
from peewee import *
from playhouse.shortcuts import model_to_dict
try:
from playhouse.postgres_ext import *
except ImportError:
pass
class Model(PModel):
"""
Base model that extends the peewee one.
"""
def to_json(self, extras=None):
"""
Convert a model into a json using the playhouse shortcut.
"""
extras = extras or {}
to_dict = model_to_dict(self)
to_dict.update(extras)
return json.dumps(to_dict, cls=sel.serializers.JsonEncoder)
class Meta:
database = None
def setup(settings):
"""
Setup the database connection.
"""
connector = settings.get('db_connector')
if connector == 'postgres':
from playhouse.pool import PooledPostgresqlExtDatabase
return PooledPostgresqlExtDatabase(settings['db_name'],
user=settings['db_user'],
password=settings['db_password'],
host=settings['db_host'],
port=settings.get('db_port'),
max_connections=settings.get('db_max_conn'),
stale_timeout=settings.get('db_stale_timeout'),
timeout=settings.get('db_timeout'),
register_hstore=False)
|
baguette-io/baguette-messaging | farine/connectors/sql/__init__.py | Model.to_json | python | def to_json(self, extras=None):
extras = extras or {}
to_dict = model_to_dict(self)
to_dict.update(extras)
return json.dumps(to_dict, cls=sel.serializers.JsonEncoder) | Convert a model into a json using the playhouse shortcut. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/connectors/sql/__init__.py#L19-L26 | null | class Model(PModel):
"""
Base model that extends the peewee one.
"""
|
baguette-io/baguette-messaging | farine/stream/sse.py | SSEConsumer.run | python | def run(self, limit=None, timeout=None):
counter = 0
self.stream = sseclient.SSEClient(self.endpoint)
while True:
with utils.Timeout(timeout):
try:
event = next(self.stream)
except StopIteration:
continue
else:
if not event.data:
continue
self.main_callback(event.data)
counter += 1
if limit and counter >= limit:
return | Consume the event stream.
:param timeout: Duration of the connection timeout.
:type timeout: int
:param limit: Number of events to consume.
:type limit: int
:rtype: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/stream/sse.py#L40-L63 | [
"def main_callback(self, *args, **kwargs):\n \"\"\"\n Main callback called when an event is received from an entry point.\n\n :returns: The entry point's callback.\n :rtype: function\n :raises NotImplementedError: When the entrypoint doesn't have the required attributes.\n \"\"\"\n if not self.callback:\n raise NotImplementedError('Entrypoints must declare `callback`')\n if not self.settings:\n raise NotImplementedError('Entrypoints must declare `settings`')\n\n self.callback.im_self.db = None\n\n #1. Start all the middlewares\n with self.debug(*args, **kwargs):\n with self.database():\n #2. `Real` callback\n result = self.callback(*args, **kwargs)#pylint: disable=not-callable\n return result\n"
] | class SSEConsumer(EntryPointMixin):
"""
HTTP SSE consumer.
"""
def __init__(self, *args, **kwargs):#pylint:disable=unused-argument
"""
:param service: The service's name which consume the stream.
:type service: str
:param callback: The callback to call when receiving a message.
:type callback: object
:rtype: None
"""
self.callback = kwargs.get('callback')
self.service = kwargs.get('service')
self.settings = getattr(farine.settings, self.service)
self.endpoint = kwargs.get('endpoint', self.settings['endpoint'])
self.headers = kwargs.get('headers', {'Accept':'text/event-stream'})
self.stream = None
@contextlib.contextmanager
def debug(self, data):#pylint:disable=arguments-differ,unused-argument
"""
Add a debug method.
"""
yield
def start(self, *args, **kwargs):#pylint:disable=unused-argument
"""
| Launch the SSE consumer.
| It can listen forever for messages or just wait for one.
:param limit: If set, the consumer listens for a limited number of events.
:type limit: int
:param timeout: If set, the consumer listens for an event for a limited time.
:type timeout: int
:rtype: None
"""
limit = kwargs.get('limit', None)
timeout = kwargs.get('timeout', None)
self.run(limit=limit, timeout=timeout)
def stop(self):
"""
Stop the consumer.
:rtype: None
"""
|
baguette-io/baguette-messaging | farine/stream/sse.py | SSEConsumer.start | python | def start(self, *args, **kwargs):#pylint:disable=unused-argument
limit = kwargs.get('limit', None)
timeout = kwargs.get('timeout', None)
self.run(limit=limit, timeout=timeout) | | Launch the SSE consumer.
| It can listen forever for messages or just wait for one.
:param limit: If set, the consumer listens for a limited number of events.
:type limit: int
:param timeout: If set, the consumer listens for an event for a limited time.
:type timeout: int
:rtype: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/stream/sse.py#L65-L78 | [
"def run(self, limit=None, timeout=None):\n \"\"\"\n Consume the event stream.\n :param timeout: Duration of the connection timeout.\n :type timeout: int\n :param limit: Number of events to consume.\n :type limit: int\n :rtype: None\n \"\"\"\n counter = 0\n self.stream = sseclient.SSEClient(self.endpoint)\n while True:\n with utils.Timeout(timeout):\n try:\n event = next(self.stream)\n except StopIteration:\n continue\n else:\n if not event.data:\n continue\n self.main_callback(event.data)\n counter += 1\n if limit and counter >= limit:\n return\n"
] | class SSEConsumer(EntryPointMixin):
"""
HTTP SSE consumer.
"""
def __init__(self, *args, **kwargs):#pylint:disable=unused-argument
"""
:param service: The service's name which consume the stream.
:type service: str
:param callback: The callback to call when receiving a message.
:type callback: object
:rtype: None
"""
self.callback = kwargs.get('callback')
self.service = kwargs.get('service')
self.settings = getattr(farine.settings, self.service)
self.endpoint = kwargs.get('endpoint', self.settings['endpoint'])
self.headers = kwargs.get('headers', {'Accept':'text/event-stream'})
self.stream = None
@contextlib.contextmanager
def debug(self, data):#pylint:disable=arguments-differ,unused-argument
"""
Add a debug method.
"""
yield
def run(self, limit=None, timeout=None):
"""
Consume the event stream.
:param timeout: Duration of the connection timeout.
:type timeout: int
:param limit: Number of events to consume.
:type limit: int
:rtype: None
"""
counter = 0
self.stream = sseclient.SSEClient(self.endpoint)
while True:
with utils.Timeout(timeout):
try:
event = next(self.stream)
except StopIteration:
continue
else:
if not event.data:
continue
self.main_callback(event.data)
counter += 1
if limit and counter >= limit:
return
def stop(self):
"""
Stop the consumer.
:rtype: None
"""
|
baguette-io/baguette-messaging | farine/amqp/consumer.py | Consumer.get_consumers | python | def get_consumers(self, _Consumer, channel):
return [_Consumer(queues=[self.queue(channel)], callbacks=[self.main_callback], prefetch_count=self.prefetch_count)] | | ConsumerMixin requirement.
| Get the consumers list.
:returns: All the consumers.
:rtype: list. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/amqp/consumer.py#L71-L79 | null | class Consumer(ConsumerMixin, EntryPointMixin):
"""
Consumer generic class.
"""
prefetch_count = None
exclusive = False
auto_delete = False
auto_generated = False
routing_key_format = None
def __init__(self, *args, **kwargs):#pylint:disable=unused-argument
"""
:param service: The service's name which consume.
:type service: str
:param queue_name: The name of queue. Optional, default to `service` value.
:type queue_name: None, str
:param exhange: The exchange's name to consume from.
:type exchange: str
:param exchange_type: The exchange's type. Default to 'direct'.
:type exchange_type: str
:param routing_key: The routing key to read the message.
:type routing_key: str
:param callback: The callback to call when receiving a message.
:type callback: object
:rtype: None
"""
self.service = kwargs.get('service')
kwargs.setdefault('exchange', self.service)
if self.auto_generated:
self.queue_name = self.routing_key = uuid.uuid4().hex
elif self.routing_key_format:
self.queue_name = self.routing_key = self.routing_key_format.format(**kwargs)
else:
self.queue_name = self.routing_key = kwargs.get('routing_key') or self.service
self.settings = getattr(farine.settings, self.service)
if not self.callback:
self.callback = kwargs.get('callback')
exchange_type = kwargs.pop('exchange_type', 'direct')
self.exchange = Exchange(kwargs.pop('exchange'),
type=exchange_type,
durable=self.settings['durable'],
auto_declare=self.settings['auto_declare'],
delivery_mode=self.settings['delivery_mode'])
self.queue = Queue(self.queue_name,
exchange=self.exchange,
routing_key=self.routing_key,
exclusive=self.settings.get('exclusive', self.exclusive),
auto_delete=self.settings.get('auto_delete', self.auto_delete),
durable=self.settings['durable'],
auto_declare=self.settings['auto_declare'])
self.connection = Connection(self.settings['amqp_uri'])
@contextlib.contextmanager
def debug(self, body, message):#pylint:disable=arguments-differ,unused-argument
"""
| Rewrite/move some code?
| EntryPointMixin requirement. Context Manager.
| Check if debug is enabled for the message consumed.
| If so will run cProfile for this message, and send the result
| to the `__debug__` queue of the exchange.
:param callback: the callback of the message consumed.
:type callback: function
:rtype: None
"""
is_debug = body.get('__debug__', False) if isinstance(body, dict) else False
if not is_debug:
yield
else:
#Start the profiler
result = StringIO.StringIO()
profiler = cProfile.Profile()
profiler.enable()
yield
#Close the profiler
profiler.disable()
profiler.create_stats()
stats = pstats.Stats(profiler, stream=result)
stats.strip_dirs().print_stats()
debug_message = body.copy()
debug_message['__debug__'] = result.getvalue()
#Send the result to the exchange
with producers[self.connection].acquire(block=True) as producer:
debug_queue = Queue('debug',
exchange=self.exchange,
routing_key='debug',
durable=True,
auto_declare=True)
producer.maybe_declare(debug_queue)
producer.publish(debug_message,
exchange=self.exchange,
declare=[self.exchange],
routing_key='debug')
def start(self, *args, **kwargs):#pylint:disable=unused-argument
"""
| Launch the consumer.
| It can listen forever for messages or just wait for one.
:param forever: If set, the consumer listens forever. Default to `True`.
:type forever: bool
:param timeout: If set, the consumer waits the specified seconds before quitting.
:type timeout: None, int
:rtype: None
:raises socket.timeout: when no message has been received since `timeout`.
"""
forever = kwargs.get('forever', True)
timeout = kwargs.get('timeout', None)
if forever:
return self.run(timeout=timeout)
elif timeout:
next((self.consume(timeout=timeout)), None)
else:
next((self.consume(limit=1, timeout=timeout)), None)
def stop(self):
"""
Stop to consume.
:rtype: None
"""
self.should_stop = True
self.connection.release()
|
baguette-io/baguette-messaging | farine/amqp/consumer.py | Consumer.debug | python | def debug(self, body, message):#pylint:disable=arguments-differ,unused-argument
is_debug = body.get('__debug__', False) if isinstance(body, dict) else False
if not is_debug:
yield
else:
#Start the profiler
result = StringIO.StringIO()
profiler = cProfile.Profile()
profiler.enable()
yield
#Close the profiler
profiler.disable()
profiler.create_stats()
stats = pstats.Stats(profiler, stream=result)
stats.strip_dirs().print_stats()
debug_message = body.copy()
debug_message['__debug__'] = result.getvalue()
#Send the result to the exchange
with producers[self.connection].acquire(block=True) as producer:
debug_queue = Queue('debug',
exchange=self.exchange,
routing_key='debug',
durable=True,
auto_declare=True)
producer.maybe_declare(debug_queue)
producer.publish(debug_message,
exchange=self.exchange,
declare=[self.exchange],
routing_key='debug') | | Rewrite/move some code?
| EntryPointMixin requirement. Context Manager.
| Check if debug is enabled for the message consumed.
| If so will run cProfile for this message, and send the result
| to the `__debug__` queue of the exchange.
:param callback: the callback of the message consumed.
:type callback: function
:rtype: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/amqp/consumer.py#L83-L122 | null | class Consumer(ConsumerMixin, EntryPointMixin):
"""
Consumer generic class.
"""
prefetch_count = None
exclusive = False
auto_delete = False
auto_generated = False
routing_key_format = None
def __init__(self, *args, **kwargs):#pylint:disable=unused-argument
"""
:param service: The service's name which consume.
:type service: str
:param queue_name: The name of queue. Optional, default to `service` value.
:type queue_name: None, str
:param exhange: The exchange's name to consume from.
:type exchange: str
:param exchange_type: The exchange's type. Default to 'direct'.
:type exchange_type: str
:param routing_key: The routing key to read the message.
:type routing_key: str
:param callback: The callback to call when receiving a message.
:type callback: object
:rtype: None
"""
self.service = kwargs.get('service')
kwargs.setdefault('exchange', self.service)
if self.auto_generated:
self.queue_name = self.routing_key = uuid.uuid4().hex
elif self.routing_key_format:
self.queue_name = self.routing_key = self.routing_key_format.format(**kwargs)
else:
self.queue_name = self.routing_key = kwargs.get('routing_key') or self.service
self.settings = getattr(farine.settings, self.service)
if not self.callback:
self.callback = kwargs.get('callback')
exchange_type = kwargs.pop('exchange_type', 'direct')
self.exchange = Exchange(kwargs.pop('exchange'),
type=exchange_type,
durable=self.settings['durable'],
auto_declare=self.settings['auto_declare'],
delivery_mode=self.settings['delivery_mode'])
self.queue = Queue(self.queue_name,
exchange=self.exchange,
routing_key=self.routing_key,
exclusive=self.settings.get('exclusive', self.exclusive),
auto_delete=self.settings.get('auto_delete', self.auto_delete),
durable=self.settings['durable'],
auto_declare=self.settings['auto_declare'])
self.connection = Connection(self.settings['amqp_uri'])
def get_consumers(self, _Consumer, channel):
"""
| ConsumerMixin requirement.
| Get the consumers list.
:returns: All the consumers.
:rtype: list.
"""
return [_Consumer(queues=[self.queue(channel)], callbacks=[self.main_callback], prefetch_count=self.prefetch_count)]
@contextlib.contextmanager
def start(self, *args, **kwargs):#pylint:disable=unused-argument
"""
| Launch the consumer.
| It can listen forever for messages or just wait for one.
:param forever: If set, the consumer listens forever. Default to `True`.
:type forever: bool
:param timeout: If set, the consumer waits the specified seconds before quitting.
:type timeout: None, int
:rtype: None
:raises socket.timeout: when no message has been received since `timeout`.
"""
forever = kwargs.get('forever', True)
timeout = kwargs.get('timeout', None)
if forever:
return self.run(timeout=timeout)
elif timeout:
next((self.consume(timeout=timeout)), None)
else:
next((self.consume(limit=1, timeout=timeout)), None)
def stop(self):
"""
Stop to consume.
:rtype: None
"""
self.should_stop = True
self.connection.release()
|
baguette-io/baguette-messaging | farine/amqp/consumer.py | Consumer.start | python | def start(self, *args, **kwargs):#pylint:disable=unused-argument
forever = kwargs.get('forever', True)
timeout = kwargs.get('timeout', None)
if forever:
return self.run(timeout=timeout)
elif timeout:
next((self.consume(timeout=timeout)), None)
else:
next((self.consume(limit=1, timeout=timeout)), None) | | Launch the consumer.
| It can listen forever for messages or just wait for one.
:param forever: If set, the consumer listens forever. Default to `True`.
:type forever: bool
:param timeout: If set, the consumer waits the specified seconds before quitting.
:type timeout: None, int
:rtype: None
:raises socket.timeout: when no message has been received since `timeout`. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/amqp/consumer.py#L125-L144 | null | class Consumer(ConsumerMixin, EntryPointMixin):
"""
Consumer generic class.
"""
prefetch_count = None
exclusive = False
auto_delete = False
auto_generated = False
routing_key_format = None
def __init__(self, *args, **kwargs):#pylint:disable=unused-argument
"""
:param service: The service's name which consume.
:type service: str
:param queue_name: The name of queue. Optional, default to `service` value.
:type queue_name: None, str
:param exhange: The exchange's name to consume from.
:type exchange: str
:param exchange_type: The exchange's type. Default to 'direct'.
:type exchange_type: str
:param routing_key: The routing key to read the message.
:type routing_key: str
:param callback: The callback to call when receiving a message.
:type callback: object
:rtype: None
"""
self.service = kwargs.get('service')
kwargs.setdefault('exchange', self.service)
if self.auto_generated:
self.queue_name = self.routing_key = uuid.uuid4().hex
elif self.routing_key_format:
self.queue_name = self.routing_key = self.routing_key_format.format(**kwargs)
else:
self.queue_name = self.routing_key = kwargs.get('routing_key') or self.service
self.settings = getattr(farine.settings, self.service)
if not self.callback:
self.callback = kwargs.get('callback')
exchange_type = kwargs.pop('exchange_type', 'direct')
self.exchange = Exchange(kwargs.pop('exchange'),
type=exchange_type,
durable=self.settings['durable'],
auto_declare=self.settings['auto_declare'],
delivery_mode=self.settings['delivery_mode'])
self.queue = Queue(self.queue_name,
exchange=self.exchange,
routing_key=self.routing_key,
exclusive=self.settings.get('exclusive', self.exclusive),
auto_delete=self.settings.get('auto_delete', self.auto_delete),
durable=self.settings['durable'],
auto_declare=self.settings['auto_declare'])
self.connection = Connection(self.settings['amqp_uri'])
def get_consumers(self, _Consumer, channel):
"""
| ConsumerMixin requirement.
| Get the consumers list.
:returns: All the consumers.
:rtype: list.
"""
return [_Consumer(queues=[self.queue(channel)], callbacks=[self.main_callback], prefetch_count=self.prefetch_count)]
@contextlib.contextmanager
def debug(self, body, message):#pylint:disable=arguments-differ,unused-argument
"""
| Rewrite/move some code?
| EntryPointMixin requirement. Context Manager.
| Check if debug is enabled for the message consumed.
| If so will run cProfile for this message, and send the result
| to the `__debug__` queue of the exchange.
:param callback: the callback of the message consumed.
:type callback: function
:rtype: None
"""
is_debug = body.get('__debug__', False) if isinstance(body, dict) else False
if not is_debug:
yield
else:
#Start the profiler
result = StringIO.StringIO()
profiler = cProfile.Profile()
profiler.enable()
yield
#Close the profiler
profiler.disable()
profiler.create_stats()
stats = pstats.Stats(profiler, stream=result)
stats.strip_dirs().print_stats()
debug_message = body.copy()
debug_message['__debug__'] = result.getvalue()
#Send the result to the exchange
with producers[self.connection].acquire(block=True) as producer:
debug_queue = Queue('debug',
exchange=self.exchange,
routing_key='debug',
durable=True,
auto_declare=True)
producer.maybe_declare(debug_queue)
producer.publish(debug_message,
exchange=self.exchange,
declare=[self.exchange],
routing_key='debug')
def stop(self):
"""
Stop to consume.
:rtype: None
"""
self.should_stop = True
self.connection.release()
|
baguette-io/baguette-messaging | farine/amqp/publisher.py | Publisher.send | python | def send(self, message, *args, **kwargs):
routing_keys = kwargs.get('routing_key') or self.routing_key
routing_keys = [routing_keys] if isinstance(routing_keys, basestring) else routing_keys
correlation_id = kwargs.get('correlation_id', None)
reply_to = kwargs.get('reply_to', None)
declare=[self.exchange] + kwargs.get('declare', [])
conn = self.get_connection()
with connections[conn].acquire(block=True) as connection:
self.exchange.maybe_bind(connection)
#reply_to.maybe_bind(connection)
#reply_to.declare(True)
with producers[connection].acquire(block=True) as producer:
for routing_key in routing_keys:
LOGGER.info('Send message %s to exchange %s with routing_key %s reply_to %s correlation_id %s',
message, self.exchange.name, routing_key, reply_to, correlation_id)
producer.publish(
message,
exchange=self.exchange,
declare=declare,
serializer=self.settings['serializer'],
routing_key=routing_key,
correlation_id=correlation_id,
retry=self.settings['retry'],
delivery_mode=self.settings['delivery_mode'],
reply_to=reply_to,
retry_policy=self.settings['retry_policy']) | Send the the `message` to the broker.
:param message: The message to send. Its type depends on the serializer used.
:type message: object
:rtype: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/amqp/publisher.py#L47-L79 | [
"def get_connection(self):\n \"\"\"\n Retrieve the connection, lazily.\n\n :returns: The broker connection.\n :rtype: kombu.connection.Connection\n \"\"\"\n return Connection(self.settings['amqp_uri'])\n"
] | class Publisher(object):
"""
Publisher generic class. Also known as Producer.
"""
auto_delete = False
def __init__(self, name, routing_key, service=None):
"""
Messages will be published to `exchange`, using these different settings.
:param name: The exchange name, required.
:type name: str
:param routing_key: The key used to route the message.
:type routing_key: None, str
:param service: The service's name. Used to get the configuration
:type service: None, str
:rtype: None
"""
self.settings = getattr(farine.settings, service or name)
self.routing_key = routing_key
self.exchange = Exchange(name, type=self.settings['type'],
durable=self.settings['durable'],
auto_delete=self.settings.get('auto_delete', self.auto_delete),
delivery_mode=self.settings['delivery_mode'])
def get_connection(self):
"""
Retrieve the connection, lazily.
:returns: The broker connection.
:rtype: kombu.connection.Connection
"""
return Connection(self.settings['amqp_uri'])
def __call__(self, *args, **kwargs):
return self.send(*args, **kwargs)
def close(self):
"""
Release the connection to the broker.
:rtype: None
"""
self.get_connection().release()
|
baguette-io/baguette-messaging | farine/main.py | main | python | def main(module=None):
if not module:
#Parsing
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--start', type=str, help='Start the module', required=True,
dest='module')
args = parser.parse_args()
module = args.module
#1. Load settings
farine.settings.load()
#2. Load the module
farine.discovery.import_module(module)
#3. Start the module
farine.discovery.start() | Entry point. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/main.py#L15-L31 | [
"def start():\n \"\"\"\n | Start all the registered entrypoints\n | that have been added to `ENTRYPOINTS`.\n\n :rtype: None\n \"\"\"\n pool = gevent.threadpool.ThreadPool(len(ENTRYPOINTS))\n for entrypoint, callback, args, kwargs in ENTRYPOINTS:\n cname = callback.__name__\n #1. Retrieve the class which owns the callback\n for name, klass in inspect.getmembers(sys.modules[callback.__module__], inspect.isclass):\n if hasattr(klass, cname):\n service_name = name.lower()\n break\n #2.Start the entrypoint\n callback = getattr(klass(), cname)\n kwargs.update({'service':service_name, 'callback':callback, 'callback_name': cname})\n LOGGER.info('Start service %s[%s].', service_name.capitalize(), cname)\n obj = entrypoint(*args, **kwargs)\n pool.spawn(obj.start, *args, **kwargs)\n pool.join()\n return True\n",
"def import_module(module):\n \"\"\"\n | Given a module `service`, try to import it.\n | It will autodiscovers all the entrypoints\n | and add them in `ENTRYPOINTS`.\n\n :param module: The module's name to import.\n :type module: str\n :rtype: None\n :raises ImportError: When the service/module to start is not found.\n \"\"\"\n try:\n __import__('{0}.service'.format(module))\n except ImportError:\n LOGGER.error('No module/service found. Quit.')\n sys.exit(0)\n",
"def load():\n \"\"\"\n | Load the configuration file.\n | Add dynamically configuration to the module.\n\n :rtype: None\n \"\"\"\n config = ConfigParser.RawConfigParser(DEFAULTS)\n config.readfp(open(CONF_PATH))\n for section in config.sections():\n globals()[section] = {}\n for key, val in config.items(section):\n globals()[section][key] = val\n"
] | #-*- coding:utf-8 -*-
"""
Entrypoint of the package.
Can start a module here.
"""
import argparse
import kombu.log
import farine.discovery
import farine.settings
import farine.log
farine.log.setup_logging(__name__)
kombu.log.setup_logging()
|
baguette-io/baguette-messaging | farine/log/__init__.py | setup_logging | python | def setup_logging(app, disable_existing_loggers=True):
conf = yaml.load(open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logging.yaml'), 'r'))
conf['disable_existing_loggers'] = disable_existing_loggers
conf['loggers'][app] = conf['loggers'].pop('__name__')
logging.config.dictConfig(conf) | Setup the logging using logging.yaml.
:param app: The app which setups the logging. Used for the log's filename and for the log's name.
:type app: str
:param disable_existing_loggers: If False, loggers which exist when this call is made are left enabled.
:type disable_existing_loggers: bool
:returns: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/log/__init__.py#L10-L22 | null | #-*- coding:utf-8 -*-
"""
Module which setup the logging using a yaml file.
"""
import logging
import logging.config
import os
import yaml
|
baguette-io/baguette-messaging | farine/execute/method.py | Method.start | python | def start(self, *args, **kwargs):#pylint:disable=unused-argument
restart = kwargs.get('restart', True)
return self.run(restart) | Launch the method.
:param restart: Restart the method if it ends.
:type restart: bool
:rtype: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/execute/method.py#L46-L54 | [
"def run(self, restart):\n \"\"\"\n Execute the method.\n :param restart: Restart the method if it ends.\n :type restart: bool\n :rtype: None\n \"\"\"\n while True:\n result = self.main_callback()\n if not restart:\n break\n return result\n"
] | class Method(EntryPointMixin):
"""
Execute the method.
"""
def __init__(self, *args, **kwargs):#pylint:disable=unused-argument
"""
:param service: The service's name which execute the method.
:type service: str
:param callback: The method to call.
:type callback: object
:rtype: None
"""
self.callback = kwargs.get('callback')
self.service = kwargs.get('service')
self.settings = getattr(farine.settings, self.service)
@contextlib.contextmanager
def debug(self):#pylint:disable=arguments-differ,unused-argument
"""
Add a debug method.
"""
yield
def run(self, restart):
"""
Execute the method.
:param restart: Restart the method if it ends.
:type restart: bool
:rtype: None
"""
while True:
result = self.main_callback()
if not restart:
break
return result
def stop(self):
"""
Stop the execution.
:rtype: None
"""
|
baguette-io/baguette-messaging | farine/settings.py | load | python | def load():
config = ConfigParser.RawConfigParser(DEFAULTS)
config.readfp(open(CONF_PATH))
for section in config.sections():
globals()[section] = {}
for key, val in config.items(section):
globals()[section][key] = val | | Load the configuration file.
| Add dynamically configuration to the module.
:rtype: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/settings.py#L36-L48 | null | #-*- coding:utf-8 -*-
"""
Module containing the configuration.
To load the configuration, call the `load()` function.
By default the configuration's filepath */etc/farine.ini* will be use.
To override it, set the environment variable **FARINE_INI**.
The format of the config must be compliant with configparser,
have a [DEFAULT] section and one by service.
Example:
| [DEFAULT]
| amqp_uri=amqp://user:password@localhost:5672/vhost
| [taskstatus]
| enabled = true
"""
import os
import ConfigParser
CONF_PATH = os.environ.get('FARINE_INI', '/etc/farine.ini')
DEFAULTS = {
'type': 'direct', #Exchange's type : `direct`, `topic`, `broadcast`
'durable': True, #Does the exchange still exist after the AMQP server restart.
'auto_declare': True, #Does the queue auto declare itself.
'delivery_mode': 2, #How the messages are stored in the server. Transient=>1 or Persistent=>2
'retry': True, #Retry sending message or declaring the exchange if the connection is lost.
'retry_policy' : {'max_retries': 5}, #Retry's policy.
'serializer': 'json', #The serializer used to encode the message.
}
|
baguette-io/baguette-messaging | farine/discovery.py | import_module | python | def import_module(module):
try:
__import__('{0}.service'.format(module))
except ImportError:
LOGGER.error('No module/service found. Quit.')
sys.exit(0) | | Given a module `service`, try to import it.
| It will autodiscovers all the entrypoints
| and add them in `ENTRYPOINTS`.
:param module: The module's name to import.
:type module: str
:rtype: None
:raises ImportError: When the service/module to start is not found. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/discovery.py#L19-L34 | null | #-*- coding:utf-8 -*-
"""
| Module containing everything related to service management:
| Register services to launch, start them.
|TODO: J'en ai vu de la merde, mais de la merde comme ca. CLEANUP.
"""
import importlib
import inspect
import logging
import sys
import gevent.threadpool
LOGGER = logging.getLogger(__name__)
ENTRYPOINTS = [] #List of a tuple: (Entrypoint, callback, args, kwargs)
def import_models(module):
"""
| Given a module `service`, try to import its models module.
:param module: The module's name to import the models.
:type module: str
:rtype: list
:returns: all the models defined.
"""
try:
module = importlib.import_module('{0}.models'.format(module))
except ImportError:
return []
else:
clsmembers = inspect.getmembers(module, lambda member: inspect.isclass(member) and member.__module__ == module.__name__)
return [kls for name, kls in clsmembers]
def start():
"""
| Start all the registered entrypoints
| that have been added to `ENTRYPOINTS`.
:rtype: None
"""
pool = gevent.threadpool.ThreadPool(len(ENTRYPOINTS))
for entrypoint, callback, args, kwargs in ENTRYPOINTS:
cname = callback.__name__
#1. Retrieve the class which owns the callback
for name, klass in inspect.getmembers(sys.modules[callback.__module__], inspect.isclass):
if hasattr(klass, cname):
service_name = name.lower()
break
#2.Start the entrypoint
callback = getattr(klass(), cname)
kwargs.update({'service':service_name, 'callback':callback, 'callback_name': cname})
LOGGER.info('Start service %s[%s].', service_name.capitalize(), cname)
obj = entrypoint(*args, **kwargs)
pool.spawn(obj.start, *args, **kwargs)
pool.join()
return True
|
baguette-io/baguette-messaging | farine/discovery.py | import_models | python | def import_models(module):
try:
module = importlib.import_module('{0}.models'.format(module))
except ImportError:
return []
else:
clsmembers = inspect.getmembers(module, lambda member: inspect.isclass(member) and member.__module__ == module.__name__)
return [kls for name, kls in clsmembers] | | Given a module `service`, try to import its models module.
:param module: The module's name to import the models.
:type module: str
:rtype: list
:returns: all the models defined. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/discovery.py#L36-L51 | null | #-*- coding:utf-8 -*-
"""
| Module containing everything related to service management:
| Register services to launch, start them.
|TODO: J'en ai vu de la merde, mais de la merde comme ca. CLEANUP.
"""
import importlib
import inspect
import logging
import sys
import gevent.threadpool
LOGGER = logging.getLogger(__name__)
ENTRYPOINTS = [] #List of a tuple: (Entrypoint, callback, args, kwargs)
def import_module(module):
"""
| Given a module `service`, try to import it.
| It will autodiscovers all the entrypoints
| and add them in `ENTRYPOINTS`.
:param module: The module's name to import.
:type module: str
:rtype: None
:raises ImportError: When the service/module to start is not found.
"""
try:
__import__('{0}.service'.format(module))
except ImportError:
LOGGER.error('No module/service found. Quit.')
sys.exit(0)
def start():
"""
| Start all the registered entrypoints
| that have been added to `ENTRYPOINTS`.
:rtype: None
"""
pool = gevent.threadpool.ThreadPool(len(ENTRYPOINTS))
for entrypoint, callback, args, kwargs in ENTRYPOINTS:
cname = callback.__name__
#1. Retrieve the class which owns the callback
for name, klass in inspect.getmembers(sys.modules[callback.__module__], inspect.isclass):
if hasattr(klass, cname):
service_name = name.lower()
break
#2.Start the entrypoint
callback = getattr(klass(), cname)
kwargs.update({'service':service_name, 'callback':callback, 'callback_name': cname})
LOGGER.info('Start service %s[%s].', service_name.capitalize(), cname)
obj = entrypoint(*args, **kwargs)
pool.spawn(obj.start, *args, **kwargs)
pool.join()
return True
|
baguette-io/baguette-messaging | farine/discovery.py | start | python | def start():
pool = gevent.threadpool.ThreadPool(len(ENTRYPOINTS))
for entrypoint, callback, args, kwargs in ENTRYPOINTS:
cname = callback.__name__
#1. Retrieve the class which owns the callback
for name, klass in inspect.getmembers(sys.modules[callback.__module__], inspect.isclass):
if hasattr(klass, cname):
service_name = name.lower()
break
#2.Start the entrypoint
callback = getattr(klass(), cname)
kwargs.update({'service':service_name, 'callback':callback, 'callback_name': cname})
LOGGER.info('Start service %s[%s].', service_name.capitalize(), cname)
obj = entrypoint(*args, **kwargs)
pool.spawn(obj.start, *args, **kwargs)
pool.join()
return True | | Start all the registered entrypoints
| that have been added to `ENTRYPOINTS`.
:rtype: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/discovery.py#L53-L75 | null | #-*- coding:utf-8 -*-
"""
| Module containing everything related to service management:
| Register services to launch, start them.
|TODO: J'en ai vu de la merde, mais de la merde comme ca. CLEANUP.
"""
import importlib
import inspect
import logging
import sys
import gevent.threadpool
LOGGER = logging.getLogger(__name__)
ENTRYPOINTS = [] #List of a tuple: (Entrypoint, callback, args, kwargs)
def import_module(module):
"""
| Given a module `service`, try to import it.
| It will autodiscovers all the entrypoints
| and add them in `ENTRYPOINTS`.
:param module: The module's name to import.
:type module: str
:rtype: None
:raises ImportError: When the service/module to start is not found.
"""
try:
__import__('{0}.service'.format(module))
except ImportError:
LOGGER.error('No module/service found. Quit.')
sys.exit(0)
def import_models(module):
"""
| Given a module `service`, try to import its models module.
:param module: The module's name to import the models.
:type module: str
:rtype: list
:returns: all the models defined.
"""
try:
module = importlib.import_module('{0}.models'.format(module))
except ImportError:
return []
else:
clsmembers = inspect.getmembers(module, lambda member: inspect.isclass(member) and member.__module__ == module.__name__)
return [kls for name, kls in clsmembers]
|
baguette-io/baguette-messaging | farine/mixins.py | EntryPointMixin.main_callback | python | def main_callback(self, *args, **kwargs):
if not self.callback:
raise NotImplementedError('Entrypoints must declare `callback`')
if not self.settings:
raise NotImplementedError('Entrypoints must declare `settings`')
self.callback.im_self.db = None
#1. Start all the middlewares
with self.debug(*args, **kwargs):
with self.database():
#2. `Real` callback
result = self.callback(*args, **kwargs)#pylint: disable=not-callable
return result | Main callback called when an event is received from an entry point.
:returns: The entry point's callback.
:rtype: function
:raises NotImplementedError: When the entrypoint doesn't have the required attributes. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/mixins.py#L20-L40 | [
"def debug(self):\n \"\"\"\n | The debug implementation for the entry point,\n | Each entry point must have it's own debug logic.\n \"\"\"\n"
] | class EntryPointMixin(object):
"""
| Entry point Mixin.
| All services that are listening for an event(like a consumer, an http server, ...)
| are called `Entry Point` and then must inherit from it.
| They will implement debug mode, monitoring, etc.
"""
__metaclass__ = abc.ABCMeta
callback = None
@abc.abstractmethod
def debug(self):
"""
| The debug implementation for the entry point,
| Each entry point must have it's own debug logic.
"""
@contextlib.contextmanager
def database(self):
"""
Before the callback is called, initialize the database if needed.
:rtype: None
"""
#1. Initialize
self.callback.im_self.db = sql.setup(self.settings)
if self.callback.im_self.db:
module = '.'.join(self.callback.im_self.__module__.split('.')[:-1])
sql.init(module, self.callback.im_self.db)
self.callback.im_self.db.connect()
yield
#2. Cleanup
if self.callback.im_self.db:
self.callback.im_self.db.close()
|
baguette-io/baguette-messaging | farine/mixins.py | EntryPointMixin.database | python | def database(self):
#1. Initialize
self.callback.im_self.db = sql.setup(self.settings)
if self.callback.im_self.db:
module = '.'.join(self.callback.im_self.__module__.split('.')[:-1])
sql.init(module, self.callback.im_self.db)
self.callback.im_self.db.connect()
yield
#2. Cleanup
if self.callback.im_self.db:
self.callback.im_self.db.close() | Before the callback is called, initialize the database if needed.
:rtype: None | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/mixins.py#L50-L64 | [
"def setup(settings):\n \"\"\"\n Setup the database connection.\n \"\"\"\n connector = settings.get('db_connector')\n if connector == 'postgres':\n from playhouse.pool import PooledPostgresqlExtDatabase\n return PooledPostgresqlExtDatabase(settings['db_name'],\n user=settings['db_user'],\n password=settings['db_password'],\n host=settings['db_host'],\n port=settings.get('db_port'),\n max_connections=settings.get('db_max_conn'),\n stale_timeout=settings.get('db_stale_timeout'),\n timeout=settings.get('db_timeout'),\n register_hstore=False)\n",
"def init(module, db):\n \"\"\"\n Initialize the models.\n \"\"\"\n for model in farine.discovery.import_models(module):\n model._meta.database = db\n"
] | class EntryPointMixin(object):
"""
| Entry point Mixin.
| All services that are listening for an event(like a consumer, an http server, ...)
| are called `Entry Point` and then must inherit from it.
| They will implement debug mode, monitoring, etc.
"""
__metaclass__ = abc.ABCMeta
callback = None
def main_callback(self, *args, **kwargs):
"""
Main callback called when an event is received from an entry point.
:returns: The entry point's callback.
:rtype: function
:raises NotImplementedError: When the entrypoint doesn't have the required attributes.
"""
if not self.callback:
raise NotImplementedError('Entrypoints must declare `callback`')
if not self.settings:
raise NotImplementedError('Entrypoints must declare `settings`')
self.callback.im_self.db = None
#1. Start all the middlewares
with self.debug(*args, **kwargs):
with self.database():
#2. `Real` callback
result = self.callback(*args, **kwargs)#pylint: disable=not-callable
return result
@abc.abstractmethod
def debug(self):
"""
| The debug implementation for the entry point,
| Each entry point must have it's own debug logic.
"""
@contextlib.contextmanager
|
baguette-io/baguette-messaging | farine/rpc/client.py | Client.callback | python | def callback(self, result, message):
self.results.put(result)
message.ack() | Method called automatically when a message is received.
This will be executed before exiting self.start(). | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/rpc/client.py#L31-L37 | null | class Client(farine.amqp.Consumer):
"""
RPC Client which is
an AMQP publisher then consumer.
"""
prefetch_count = 1
auto_generated = True
#TODO: delete the queue when the rpc call is done.
#auto_delete = True
#exclusive = True
def __init__(self, service, timeout=None):
super(Client, self).__init__(service, service, service=service)
self.timeout = timeout
self.remote = None
self.running = True
self.results = Queue.Queue()
def __wrap_rpc__(self, *args, **kwargs):
"""
| Wrapper for our RPC method:
| if it's a non streaming call, then we return the next and only one element of the generator.
| Otherwise we returns the generator.
"""
stream = kwargs.pop('__stream__', False)
result = self.__rpc__(*args, **kwargs)
if stream:
return result
return next(result)
def __rpc__(self, *args, **kwargs):
"""
RPC call logic.
There are two types of rpc calls:
Streaming and basic.
We don't set the auto_delete flag to the queue because of the streaming call.
"""
self.correlation_id = uuid.uuid4().hex
message = {'args': args,
'kwargs': kwargs
}
publish = farine.amqp.Publisher(self.service, '{}__{}'.format(self.service, self.remote))
publish.send(message,
correlation_id=self.correlation_id,
reply_to=self.queue.name,
declare=[self.queue],
)
while self.running:
try:
self.start(forever=False, timeout=self.timeout)
except:#pylint:disable=bare-except
raise exceptions.RPCError(traceback.format_exc())
# Iterate over the queue
while not self.results.empty():
result = self.results.get()
if result.get('__except__'):
raise exceptions.RPCError(result['__except__'])
elif result.get('__end__'):
self.running = False
elif result.get('body'):
yield result['body']
def __getattr__(self, method, *args, **kwargs):
self.remote = method
return self.__wrap_rpc__
|
baguette-io/baguette-messaging | farine/connectors/sql/entrypoints.py | migrate | python | def migrate(module=None):
if not module:
#Parsing
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--service', type=str, help='Migrate the module', required=True,
dest='module')
args = parser.parse_args()
module = args.module
#1. Load settings
farine.settings.load()
#2. Load the module
models = farine.discovery.import_models(module)
#3. Get the connection
db = farine.connectors.sql.setup(getattr(farine.settings, module))
#4. Create tables
for model in models:
model._meta.database = db
model.create_table(fail_silently=True) | Entrypoint to migrate the schema.
For the moment only create tables. | train | https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/connectors/sql/entrypoints.py#L13-L34 | [
"def setup(settings):\n \"\"\"\n Setup the database connection.\n \"\"\"\n connector = settings.get('db_connector')\n if connector == 'postgres':\n from playhouse.pool import PooledPostgresqlExtDatabase\n return PooledPostgresqlExtDatabase(settings['db_name'],\n user=settings['db_user'],\n password=settings['db_password'],\n host=settings['db_host'],\n port=settings.get('db_port'),\n max_connections=settings.get('db_max_conn'),\n stale_timeout=settings.get('db_stale_timeout'),\n timeout=settings.get('db_timeout'),\n register_hstore=False)\n",
"def import_models(module):\n \"\"\"\n | Given a module `service`, try to import its models module.\n\n :param module: The module's name to import the models.\n :type module: str\n :rtype: list\n :returns: all the models defined.\n \"\"\"\n try:\n module = importlib.import_module('{0}.models'.format(module))\n except ImportError:\n return []\n else:\n clsmembers = inspect.getmembers(module, lambda member: inspect.isclass(member) and member.__module__ == module.__name__)\n return [kls for name, kls in clsmembers]\n",
"def load():\n \"\"\"\n | Load the configuration file.\n | Add dynamically configuration to the module.\n\n :rtype: None\n \"\"\"\n config = ConfigParser.RawConfigParser(DEFAULTS)\n config.readfp(open(CONF_PATH))\n for section in config.sections():\n globals()[section] = {}\n for key, val in config.items(section):\n globals()[section][key] = val\n"
] | #-*- coding:utf-8 -*-
"""
Entrypoint of the sql connectors.
"""
import argparse
import farine.connectors.sql
import farine.discovery
import farine.settings
import farine.log
farine.log.setup_logging(__name__)
|
crypto101/arthur | arthur/ui.py | _unhandledInput | python | def _unhandledInput(event, workbench, launcher):
if event == "ctrl w":
raise urwid.ExitMainLoop()
elif event == "esc":
workbench.clear()
workbench.display(launcher)
return True | Handles input events that weren't handled anywhere else. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L90-L99 | [
"def display(self, tool):\n self.tools.append(tool)\n",
"def clear(self):\n del self.tools[:]\n"
] | """
Game user interface.
"""
import urwid
from arthur.util import MultiDeferred
from zope import interface
DEFAULT_PALETTE = (
('header', 'black', 'dark green'),
('foreground', 'dark green', 'black'),
('background', 'dark gray', 'black'),
('alert', 'yellow', 'dark red')
)
BACKGROUND = urwid.AttrMap(urwid.SolidFill(u"\N{LIGHT SHADE}"), "background")
DIVIDER = urwid.Divider(u'\N{UPPER ONE EIGHTH BLOCK}')
class Workbench(object):
"""
A workbench, consisting of a top status bar and a background.
"""
def __init__(self):
self.header = Header()
self.widget = urwid.Frame(header=self.header.widget, body=BACKGROUND)
self._tools = []
def display(self, tool):
"""Displays the given tool above the current layer, and sets the
title to its name.
"""
self._tools.append(tool)
self._justDisplay(tool)
def _justDisplay(self, tool):
"""
Displays the given tool. Does not register it in the tools list.
"""
self.header.title.set_text(tool.name)
body, _options = self.widget.contents["body"]
overlay = urwid.Overlay(tool.widget, body, *tool.position)
self._surface = urwid.AttrMap(overlay, "foreground")
self.widget.contents["body"] = self._surface, None
def undisplay(self):
"""Undisplays the top tool.
This actually forces a complete re-render.
"""
self._tools.pop()
self._justClear()
for tool in self._tools:
self._justDisplay(tool)
def clear(self):
"""
Clears the workbench completely.
"""
self._tools = []
self._justClear()
def _justClear(self):
self.header.title.set_text(u"")
self.widget.contents["body"] = BACKGROUND, None
class Header(object):
"""
A header. Contains a title and an aside.
"""
def __init__(self):
self.title = urwid.Text(u"", align="left")
self.aside = urwid.Text(u"Press C-w to quit", align="right")
columns = urwid.Columns([self.title, self.aside])
self.widget = urwid.AttrMap(columns, "header")
class ITool(interface.Interface):
"""
A tool, displayable by a workbench.
"""
name = interface.Attribute(
"""
The name of the tool, which will be used in the title.
""")
widget = interface.Attribute(
"""
The widget that will be displayed on the workbench.
""")
position = interface.Attribute(
"""
The position of the tool's widget on the workbench.
""")
@interface.implementer(ITool)
class Launcher(object):
"""The launcher.
The launcher is a tool that launches other tools. Since it has to
display other tools, it has a reference to the workbench.
"""
name = u"Launcher"
position = "center", 30, "middle", 10
def __init__(self, workbench, tools):
self.workbench = workbench
body = [urwid.Text(u"Select a tool to launch"), DIVIDER]
for tool in tools:
button = urwid.Button(tool.name)
urwid.connect_signal(button, 'click', self._launch, tool)
body.append(urwid.AttrMap(button, "foreground", focus_map="header"))
self.menu = urwid.ListBox(urwid.SimpleFocusListWalker(body))
self.widget = urwid.LineBox(self.menu)
def _launch(self, _button, tool):
"""Button callback to launch a tool.
Tells the workbench to display the given tool.
"""
self.workbench.display(tool)
@interface.implementer(ITool)
class _PopUp(object):
"""
A generic pop-up.
"""
position = "center", 50, "middle", 7
def __init__(self, name):
self.name = name
widgets = [urwid.Text(name), DIVIDER] + self._makeExtraWidgets()
self.listBox = urwid.ListBox(urwid.SimpleListWalker(widgets))
self.widget = urwid.LineBox(self.listBox)
def _makeExtraWidgets(self):
return []
class _Splash(_PopUp):
"""
A splash screen: like a notification, except you can't dismiss it.
"""
def __init__(self, name, text):
self.text = text
_PopUp.__init__(self, name)
def _makeExtraWidgets(self):
"""Makes a text widget.
"""
self.textWidget = urwid.Text(self.text)
return [self.textWidget]
class _ButtonPopUp(_PopUp):
"""A pop up with one or more buttons, and support for notification
when they've been clicked.
"""
def __init__(self, name):
_PopUp.__init__(self, name)
self._result = MultiDeferred()
def _makeExtraWidgets(self):
"""Makes the extra widgets.
This defers to the ``make(TextWidgets|Buttons)`` methods; so
they can be overridden separately.
"""
return self._makeTextWidgets() + self._makeButtons()
def _makeTextWidgets(self):
"""Makes (optional) text widgets.
Override this in a subclass.
"""
return []
def _makeButtons(self):
"""Makes buttons and wires them up.
"""
self.button = button = urwid.Button(u"OK")
urwid.connect_signal(button, "click", self._completed)
return [self.button]
def notifyCompleted(self):
"""Request to be notified when this prompt is completed.
"""
return self._result.tee()
def _completed(self, _button=None):
"""Call the completion deferreds that have been handed out.
"""
self._result.callback(None)
class _Notification(_ButtonPopUp):
"""A generic notification, which can be clicked away.
"""
def __init__(self, name, text):
self.text = text
_ButtonPopUp.__init__(self, name)
def _makeTextWidgets(self):
"""Makes a text widget.
"""
self.textWidget = urwid.Text(self.text)
return [self.textWidget]
def notify(workbench, name, text):
"""Runs a notification.
"""
return _runPopUp(workbench, _Notification(name, text))
class _Alert(_Notification):
"""A notification in a scary-looking color.
"""
def __init__(self, *args, **kwargs):
_Notification.__init__(self, *args, **kwargs)
self.originalWidget = self.widget
self.widget = urwid.AttrMap(self.originalWidget, "alert")
def alert(workbench, name, text):
"""Runs an alert.
"""
return _runPopUp(workbench, _Alert(name, text))
class _Prompt(_ButtonPopUp):
"""
A generic prompt for a single string value.
"""
position = "center", 40, "middle", 6
def __init__(self, name, promptText):
self.promptText = promptText
_ButtonPopUp.__init__(self, name)
def _makeTextWidgets(self):
"""Makes an editable prompt widget.
"""
self.prompt = urwid.Edit(self.promptText, multiline=False)
return [self.prompt]
def _completed(self, _button=None):
"""The prompt was completed. Fire all waiting deferreds with the
prompt's edit text.
"""
self._result.callback(self.prompt.edit_text)
def prompt(workbench, name, promptText):
"""Runs a prompt.
"""
return _runPopUp(workbench, _Prompt(name, promptText))
def _runPopUp(workbench, popUp):
"""Displays the pop-up on the workbench and gets a completion
notification deferred. When that fires, undisplay the pop-up and
return the result of the notification deferred verbatim.
"""
workbench.display(popUp)
d = popUp.notifyCompleted()
d.addCallback(_popUpCompleted, workbench)
return d
def _popUpCompleted(result, workbench):
"""The popUp was completed; undisplay it and return the result.
"""
workbench.undisplay()
return result
|
crypto101/arthur | arthur/ui.py | _runPopUp | python | def _runPopUp(workbench, popUp):
workbench.display(popUp)
d = popUp.notifyCompleted()
d.addCallback(_popUpCompleted, workbench)
return d | Displays the pop-up on the workbench and gets a completion
notification deferred. When that fires, undisplay the pop-up and
return the result of the notification deferred verbatim. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L330-L340 | [
"def notifyCompleted(self):\n \"\"\"Request to be notified when this prompt is completed.\n\n \"\"\"\n return self._result.tee()\n"
] | """
Game user interface.
"""
import urwid
from arthur.util import MultiDeferred
from zope import interface
DEFAULT_PALETTE = (
('header', 'black', 'dark green'),
('foreground', 'dark green', 'black'),
('background', 'dark gray', 'black'),
('alert', 'yellow', 'dark red')
)
BACKGROUND = urwid.AttrMap(urwid.SolidFill(u"\N{LIGHT SHADE}"), "background")
DIVIDER = urwid.Divider(u'\N{UPPER ONE EIGHTH BLOCK}')
class Workbench(object):
"""
A workbench, consisting of a top status bar and a background.
"""
def __init__(self):
self.header = Header()
self.widget = urwid.Frame(header=self.header.widget, body=BACKGROUND)
self._tools = []
def display(self, tool):
"""Displays the given tool above the current layer, and sets the
title to its name.
"""
self._tools.append(tool)
self._justDisplay(tool)
def _justDisplay(self, tool):
"""
Displays the given tool. Does not register it in the tools list.
"""
self.header.title.set_text(tool.name)
body, _options = self.widget.contents["body"]
overlay = urwid.Overlay(tool.widget, body, *tool.position)
self._surface = urwid.AttrMap(overlay, "foreground")
self.widget.contents["body"] = self._surface, None
def undisplay(self):
"""Undisplays the top tool.
This actually forces a complete re-render.
"""
self._tools.pop()
self._justClear()
for tool in self._tools:
self._justDisplay(tool)
def clear(self):
"""
Clears the workbench completely.
"""
self._tools = []
self._justClear()
def _justClear(self):
self.header.title.set_text(u"")
self.widget.contents["body"] = BACKGROUND, None
class Header(object):
"""
A header. Contains a title and an aside.
"""
def __init__(self):
self.title = urwid.Text(u"", align="left")
self.aside = urwid.Text(u"Press C-w to quit", align="right")
columns = urwid.Columns([self.title, self.aside])
self.widget = urwid.AttrMap(columns, "header")
def _unhandledInput(event, workbench, launcher):
"""Handles input events that weren't handled anywhere else.
"""
if event == "ctrl w":
raise urwid.ExitMainLoop()
elif event == "esc":
workbench.clear()
workbench.display(launcher)
return True
class ITool(interface.Interface):
"""
A tool, displayable by a workbench.
"""
name = interface.Attribute(
"""
The name of the tool, which will be used in the title.
""")
widget = interface.Attribute(
"""
The widget that will be displayed on the workbench.
""")
position = interface.Attribute(
"""
The position of the tool's widget on the workbench.
""")
@interface.implementer(ITool)
class Launcher(object):
"""The launcher.
The launcher is a tool that launches other tools. Since it has to
display other tools, it has a reference to the workbench.
"""
name = u"Launcher"
position = "center", 30, "middle", 10
def __init__(self, workbench, tools):
self.workbench = workbench
body = [urwid.Text(u"Select a tool to launch"), DIVIDER]
for tool in tools:
button = urwid.Button(tool.name)
urwid.connect_signal(button, 'click', self._launch, tool)
body.append(urwid.AttrMap(button, "foreground", focus_map="header"))
self.menu = urwid.ListBox(urwid.SimpleFocusListWalker(body))
self.widget = urwid.LineBox(self.menu)
def _launch(self, _button, tool):
"""Button callback to launch a tool.
Tells the workbench to display the given tool.
"""
self.workbench.display(tool)
@interface.implementer(ITool)
class _PopUp(object):
"""
A generic pop-up.
"""
position = "center", 50, "middle", 7
def __init__(self, name):
self.name = name
widgets = [urwid.Text(name), DIVIDER] + self._makeExtraWidgets()
self.listBox = urwid.ListBox(urwid.SimpleListWalker(widgets))
self.widget = urwid.LineBox(self.listBox)
def _makeExtraWidgets(self):
return []
class _Splash(_PopUp):
"""
A splash screen: like a notification, except you can't dismiss it.
"""
def __init__(self, name, text):
self.text = text
_PopUp.__init__(self, name)
def _makeExtraWidgets(self):
"""Makes a text widget.
"""
self.textWidget = urwid.Text(self.text)
return [self.textWidget]
class _ButtonPopUp(_PopUp):
"""A pop up with one or more buttons, and support for notification
when they've been clicked.
"""
def __init__(self, name):
_PopUp.__init__(self, name)
self._result = MultiDeferred()
def _makeExtraWidgets(self):
"""Makes the extra widgets.
This defers to the ``make(TextWidgets|Buttons)`` methods; so
they can be overridden separately.
"""
return self._makeTextWidgets() + self._makeButtons()
def _makeTextWidgets(self):
"""Makes (optional) text widgets.
Override this in a subclass.
"""
return []
def _makeButtons(self):
"""Makes buttons and wires them up.
"""
self.button = button = urwid.Button(u"OK")
urwid.connect_signal(button, "click", self._completed)
return [self.button]
def notifyCompleted(self):
"""Request to be notified when this prompt is completed.
"""
return self._result.tee()
def _completed(self, _button=None):
"""Call the completion deferreds that have been handed out.
"""
self._result.callback(None)
class _Notification(_ButtonPopUp):
"""A generic notification, which can be clicked away.
"""
def __init__(self, name, text):
self.text = text
_ButtonPopUp.__init__(self, name)
def _makeTextWidgets(self):
"""Makes a text widget.
"""
self.textWidget = urwid.Text(self.text)
return [self.textWidget]
def notify(workbench, name, text):
"""Runs a notification.
"""
return _runPopUp(workbench, _Notification(name, text))
class _Alert(_Notification):
"""A notification in a scary-looking color.
"""
def __init__(self, *args, **kwargs):
_Notification.__init__(self, *args, **kwargs)
self.originalWidget = self.widget
self.widget = urwid.AttrMap(self.originalWidget, "alert")
def alert(workbench, name, text):
"""Runs an alert.
"""
return _runPopUp(workbench, _Alert(name, text))
class _Prompt(_ButtonPopUp):
"""
A generic prompt for a single string value.
"""
position = "center", 40, "middle", 6
def __init__(self, name, promptText):
self.promptText = promptText
_ButtonPopUp.__init__(self, name)
def _makeTextWidgets(self):
"""Makes an editable prompt widget.
"""
self.prompt = urwid.Edit(self.promptText, multiline=False)
return [self.prompt]
def _completed(self, _button=None):
"""The prompt was completed. Fire all waiting deferreds with the
prompt's edit text.
"""
self._result.callback(self.prompt.edit_text)
def prompt(workbench, name, promptText):
"""Runs a prompt.
"""
return _runPopUp(workbench, _Prompt(name, promptText))
def _popUpCompleted(result, workbench):
"""The popUp was completed; undisplay it and return the result.
"""
workbench.undisplay()
return result
|
crypto101/arthur | arthur/ui.py | Workbench.display | python | def display(self, tool):
self._tools.append(tool)
self._justDisplay(tool) | Displays the given tool above the current layer, and sets the
title to its name. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L30-L36 | [
"def _justDisplay(self, tool):\n \"\"\"\n Displays the given tool. Does not register it in the tools list.\n \"\"\"\n self.header.title.set_text(tool.name)\n\n body, _options = self.widget.contents[\"body\"]\n overlay = urwid.Overlay(tool.widget, body, *tool.position)\n self._surface = urwid.AttrMap(overlay, \"foreground\")\n self.widget.contents[\"body\"] = self._surface, None\n"
] | class Workbench(object):
"""
A workbench, consisting of a top status bar and a background.
"""
def __init__(self):
self.header = Header()
self.widget = urwid.Frame(header=self.header.widget, body=BACKGROUND)
self._tools = []
def _justDisplay(self, tool):
"""
Displays the given tool. Does not register it in the tools list.
"""
self.header.title.set_text(tool.name)
body, _options = self.widget.contents["body"]
overlay = urwid.Overlay(tool.widget, body, *tool.position)
self._surface = urwid.AttrMap(overlay, "foreground")
self.widget.contents["body"] = self._surface, None
def undisplay(self):
"""Undisplays the top tool.
This actually forces a complete re-render.
"""
self._tools.pop()
self._justClear()
for tool in self._tools:
self._justDisplay(tool)
def clear(self):
"""
Clears the workbench completely.
"""
self._tools = []
self._justClear()
def _justClear(self):
self.header.title.set_text(u"")
self.widget.contents["body"] = BACKGROUND, None
|
crypto101/arthur | arthur/ui.py | Workbench._justDisplay | python | def _justDisplay(self, tool):
self.header.title.set_text(tool.name)
body, _options = self.widget.contents["body"]
overlay = urwid.Overlay(tool.widget, body, *tool.position)
self._surface = urwid.AttrMap(overlay, "foreground")
self.widget.contents["body"] = self._surface, None | Displays the given tool. Does not register it in the tools list. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L39-L48 | null | class Workbench(object):
"""
A workbench, consisting of a top status bar and a background.
"""
def __init__(self):
self.header = Header()
self.widget = urwid.Frame(header=self.header.widget, body=BACKGROUND)
self._tools = []
def display(self, tool):
"""Displays the given tool above the current layer, and sets the
title to its name.
"""
self._tools.append(tool)
self._justDisplay(tool)
def undisplay(self):
"""Undisplays the top tool.
This actually forces a complete re-render.
"""
self._tools.pop()
self._justClear()
for tool in self._tools:
self._justDisplay(tool)
def clear(self):
"""
Clears the workbench completely.
"""
self._tools = []
self._justClear()
def _justClear(self):
self.header.title.set_text(u"")
self.widget.contents["body"] = BACKGROUND, None
|
crypto101/arthur | arthur/ui.py | Workbench.undisplay | python | def undisplay(self):
self._tools.pop()
self._justClear()
for tool in self._tools:
self._justDisplay(tool) | Undisplays the top tool.
This actually forces a complete re-render. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L51-L59 | [
"def _justDisplay(self, tool):\n \"\"\"\n Displays the given tool. Does not register it in the tools list.\n \"\"\"\n self.header.title.set_text(tool.name)\n\n body, _options = self.widget.contents[\"body\"]\n overlay = urwid.Overlay(tool.widget, body, *tool.position)\n self._surface = urwid.AttrMap(overlay, \"foreground\")\n self.widget.contents[\"body\"] = self._surface, None\n",
"def _justClear(self):\n self.header.title.set_text(u\"\")\n self.widget.contents[\"body\"] = BACKGROUND, None\n"
] | class Workbench(object):
"""
A workbench, consisting of a top status bar and a background.
"""
def __init__(self):
self.header = Header()
self.widget = urwid.Frame(header=self.header.widget, body=BACKGROUND)
self._tools = []
def display(self, tool):
"""Displays the given tool above the current layer, and sets the
title to its name.
"""
self._tools.append(tool)
self._justDisplay(tool)
def _justDisplay(self, tool):
"""
Displays the given tool. Does not register it in the tools list.
"""
self.header.title.set_text(tool.name)
body, _options = self.widget.contents["body"]
overlay = urwid.Overlay(tool.widget, body, *tool.position)
self._surface = urwid.AttrMap(overlay, "foreground")
self.widget.contents["body"] = self._surface, None
def clear(self):
"""
Clears the workbench completely.
"""
self._tools = []
self._justClear()
def _justClear(self):
self.header.title.set_text(u"")
self.widget.contents["body"] = BACKGROUND, None
|
crypto101/arthur | arthur/ui.py | _Splash._makeExtraWidgets | python | def _makeExtraWidgets(self):
self.textWidget = urwid.Text(self.text)
return [self.textWidget] | Makes a text widget. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L189-L194 | null | class _Splash(_PopUp):
"""
A splash screen: like a notification, except you can't dismiss it.
"""
def __init__(self, name, text):
self.text = text
_PopUp.__init__(self, name)
|
crypto101/arthur | arthur/ui.py | _ButtonPopUp._makeButtons | python | def _makeButtons(self):
self.button = button = urwid.Button(u"OK")
urwid.connect_signal(button, "click", self._completed)
return [self.button] | Makes buttons and wires them up. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L226-L232 | null | class _ButtonPopUp(_PopUp):
"""A pop up with one or more buttons, and support for notification
when they've been clicked.
"""
def __init__(self, name):
_PopUp.__init__(self, name)
self._result = MultiDeferred()
def _makeExtraWidgets(self):
"""Makes the extra widgets.
This defers to the ``make(TextWidgets|Buttons)`` methods; so
they can be overridden separately.
"""
return self._makeTextWidgets() + self._makeButtons()
def _makeTextWidgets(self):
"""Makes (optional) text widgets.
Override this in a subclass.
"""
return []
def notifyCompleted(self):
"""Request to be notified when this prompt is completed.
"""
return self._result.tee()
def _completed(self, _button=None):
"""Call the completion deferreds that have been handed out.
"""
self._result.callback(None)
|
crypto101/arthur | arthur/ui.py | _Notification._makeTextWidgets | python | def _makeTextWidgets(self):
self.textWidget = urwid.Text(self.text)
return [self.textWidget] | Makes a text widget. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L259-L264 | null | class _Notification(_ButtonPopUp):
"""A generic notification, which can be clicked away.
"""
def __init__(self, name, text):
self.text = text
_ButtonPopUp.__init__(self, name)
|
crypto101/arthur | arthur/ui.py | _Prompt._makeTextWidgets | python | def _makeTextWidgets(self):
self.prompt = urwid.Edit(self.promptText, multiline=False)
return [self.prompt] | Makes an editable prompt widget. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/ui.py#L306-L311 | null | class _Prompt(_ButtonPopUp):
"""
A generic prompt for a single string value.
"""
position = "center", 40, "middle", 6
def __init__(self, name, promptText):
self.promptText = promptText
_ButtonPopUp.__init__(self, name)
def _completed(self, _button=None):
"""The prompt was completed. Fire all waiting deferreds with the
prompt's edit text.
"""
self._result.callback(self.prompt.edit_text)
|
crypto101/arthur | arthur/auth.py | connect | python | def connect(workbench):
d = _getContextFactory(getDataPath(), workbench)
d.addCallback(_connectWithContextFactory, workbench)
return d | Connection inititalization routine. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/auth.py#L15-L21 | [
"def _getContextFactory(path, workbench):\n \"\"\"Get a context factory.\n\n If the client already has a credentials at path, use them.\n Otherwise, generate them at path. Notifications are reported to\n the given workbench.\n\n \"\"\"\n try:\n return succeed(getContextFactory(path))\n except IOError:\n d = prompt(workbench, u\"E-mail entry\", u\"Enter e-mail:\")\n d.addCallback(_makeCredentials, path, workbench)\n d.addCallback(lambda _result: getContextFactory(path))\n return d\n"
] | """
Stuff for connecting to a merlyn server.
"""
from arthur.ui import alert, prompt, _Splash
from arthur.protocol import Factory
from clarent.certificate import makeCredentials, getContextFactory
from clarent.path import getDataPath
from twisted.internet import reactor
from twisted.internet.defer import succeed
from twisted.internet.endpoints import SSL4ClientEndpoint
from twisted.internet.error import ConnectError
def _connectWithContextFactory(ctxFactory, workbench):
"""Connect using the given context factory. Notifications go to the
given workbench.
"""
endpoint = SSL4ClientEndpoint(reactor, "localhost", 4430, ctxFactory)
splash = _Splash(u"Connecting", u"Connecting...")
workbench.display(splash)
d = endpoint.connect(Factory(workbench))
@d.addBoth
def closeSplash(returnValue):
workbench.undisplay()
return returnValue
@d.addErrback
def notifyFailure(f):
f.trap(ConnectError)
d = alert(workbench, u"Couldn't connect", u"Connection failed! "
"Check internet connection, or try again later.\n"
"Error: {!r}".format(f.value))
return d.addCallback(lambda _result: reactor.stop())
return d
def _getContextFactory(path, workbench):
"""Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
"""
try:
return succeed(getContextFactory(path))
except IOError:
d = prompt(workbench, u"E-mail entry", u"Enter e-mail:")
d.addCallback(_makeCredentials, path, workbench)
d.addCallback(lambda _result: getContextFactory(path))
return d
def _makeCredentials(email, path, workbench):
"""Makes client certs and writes them to disk at path.
This essentially defers to clarent's ``makeCredentials`` function,
except it also shows a nice splash screen.
"""
splash = _Splash(u"SSL credential generation",
u"Generating SSL credentials. (This can take a while.)")
workbench.display(splash)
makeCredentials(path, email)
workbench.undisplay()
|
crypto101/arthur | arthur/auth.py | _connectWithContextFactory | python | def _connectWithContextFactory(ctxFactory, workbench):
endpoint = SSL4ClientEndpoint(reactor, "localhost", 4430, ctxFactory)
splash = _Splash(u"Connecting", u"Connecting...")
workbench.display(splash)
d = endpoint.connect(Factory(workbench))
@d.addBoth
def closeSplash(returnValue):
workbench.undisplay()
return returnValue
@d.addErrback
def notifyFailure(f):
f.trap(ConnectError)
d = alert(workbench, u"Couldn't connect", u"Connection failed! "
"Check internet connection, or try again later.\n"
"Error: {!r}".format(f.value))
return d.addCallback(lambda _result: reactor.stop())
return d | Connect using the given context factory. Notifications go to the
given workbench. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/auth.py#L24-L49 | null | """
Stuff for connecting to a merlyn server.
"""
from arthur.ui import alert, prompt, _Splash
from arthur.protocol import Factory
from clarent.certificate import makeCredentials, getContextFactory
from clarent.path import getDataPath
from twisted.internet import reactor
from twisted.internet.defer import succeed
from twisted.internet.endpoints import SSL4ClientEndpoint
from twisted.internet.error import ConnectError
def connect(workbench):
"""Connection inititalization routine.
"""
d = _getContextFactory(getDataPath(), workbench)
d.addCallback(_connectWithContextFactory, workbench)
return d
def _getContextFactory(path, workbench):
"""Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
"""
try:
return succeed(getContextFactory(path))
except IOError:
d = prompt(workbench, u"E-mail entry", u"Enter e-mail:")
d.addCallback(_makeCredentials, path, workbench)
d.addCallback(lambda _result: getContextFactory(path))
return d
def _makeCredentials(email, path, workbench):
"""Makes client certs and writes them to disk at path.
This essentially defers to clarent's ``makeCredentials`` function,
except it also shows a nice splash screen.
"""
splash = _Splash(u"SSL credential generation",
u"Generating SSL credentials. (This can take a while.)")
workbench.display(splash)
makeCredentials(path, email)
workbench.undisplay()
|
crypto101/arthur | arthur/auth.py | _getContextFactory | python | def _getContextFactory(path, workbench):
try:
return succeed(getContextFactory(path))
except IOError:
d = prompt(workbench, u"E-mail entry", u"Enter e-mail:")
d.addCallback(_makeCredentials, path, workbench)
d.addCallback(lambda _result: getContextFactory(path))
return d | Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/auth.py#L52-L66 | [
"def prompt(workbench, name, promptText):\n \"\"\"Runs a prompt.\n\n \"\"\"\n return _runPopUp(workbench, _Prompt(name, promptText))\n"
] | """
Stuff for connecting to a merlyn server.
"""
from arthur.ui import alert, prompt, _Splash
from arthur.protocol import Factory
from clarent.certificate import makeCredentials, getContextFactory
from clarent.path import getDataPath
from twisted.internet import reactor
from twisted.internet.defer import succeed
from twisted.internet.endpoints import SSL4ClientEndpoint
from twisted.internet.error import ConnectError
def connect(workbench):
"""Connection inititalization routine.
"""
d = _getContextFactory(getDataPath(), workbench)
d.addCallback(_connectWithContextFactory, workbench)
return d
def _connectWithContextFactory(ctxFactory, workbench):
"""Connect using the given context factory. Notifications go to the
given workbench.
"""
endpoint = SSL4ClientEndpoint(reactor, "localhost", 4430, ctxFactory)
splash = _Splash(u"Connecting", u"Connecting...")
workbench.display(splash)
d = endpoint.connect(Factory(workbench))
@d.addBoth
def closeSplash(returnValue):
workbench.undisplay()
return returnValue
@d.addErrback
def notifyFailure(f):
f.trap(ConnectError)
d = alert(workbench, u"Couldn't connect", u"Connection failed! "
"Check internet connection, or try again later.\n"
"Error: {!r}".format(f.value))
return d.addCallback(lambda _result: reactor.stop())
return d
def _makeCredentials(email, path, workbench):
"""Makes client certs and writes them to disk at path.
This essentially defers to clarent's ``makeCredentials`` function,
except it also shows a nice splash screen.
"""
splash = _Splash(u"SSL credential generation",
u"Generating SSL credentials. (This can take a while.)")
workbench.display(splash)
makeCredentials(path, email)
workbench.undisplay()
|
crypto101/arthur | arthur/auth.py | _makeCredentials | python | def _makeCredentials(email, path, workbench):
splash = _Splash(u"SSL credential generation",
u"Generating SSL credentials. (This can take a while.)")
workbench.display(splash)
makeCredentials(path, email)
workbench.undisplay() | Makes client certs and writes them to disk at path.
This essentially defers to clarent's ``makeCredentials`` function,
except it also shows a nice splash screen. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/auth.py#L69-L82 | null | """
Stuff for connecting to a merlyn server.
"""
from arthur.ui import alert, prompt, _Splash
from arthur.protocol import Factory
from clarent.certificate import makeCredentials, getContextFactory
from clarent.path import getDataPath
from twisted.internet import reactor
from twisted.internet.defer import succeed
from twisted.internet.endpoints import SSL4ClientEndpoint
from twisted.internet.error import ConnectError
def connect(workbench):
"""Connection inititalization routine.
"""
d = _getContextFactory(getDataPath(), workbench)
d.addCallback(_connectWithContextFactory, workbench)
return d
def _connectWithContextFactory(ctxFactory, workbench):
"""Connect using the given context factory. Notifications go to the
given workbench.
"""
endpoint = SSL4ClientEndpoint(reactor, "localhost", 4430, ctxFactory)
splash = _Splash(u"Connecting", u"Connecting...")
workbench.display(splash)
d = endpoint.connect(Factory(workbench))
@d.addBoth
def closeSplash(returnValue):
workbench.undisplay()
return returnValue
@d.addErrback
def notifyFailure(f):
f.trap(ConnectError)
d = alert(workbench, u"Couldn't connect", u"Connection failed! "
"Check internet connection, or try again later.\n"
"Error: {!r}".format(f.value))
return d.addCallback(lambda _result: reactor.stop())
return d
def _getContextFactory(path, workbench):
"""Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
"""
try:
return succeed(getContextFactory(path))
except IOError:
d = prompt(workbench, u"E-mail entry", u"Enter e-mail:")
d.addCallback(_makeCredentials, path, workbench)
d.addCallback(lambda _result: getContextFactory(path))
return d
|
crypto101/arthur | arthur/exercises.py | ExercisesLocator.notifySolved | python | def notifySolved(self, identifier, title):
notify(self.workbench, u"Congratulations", u"Congratulations! You "
"have completed the '{title}' exercise.".format(title=title))
return {} | Notifies the user that a particular exercise has been solved. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/exercises.py#L29-L35 | [
"def notify(workbench, name, text):\n \"\"\"Runs a notification.\n\n \"\"\"\n return _runPopUp(workbench, _Notification(name, text))\n"
] | class ExercisesLocator(amp.CommandLocator):
def __init__(self, protocol):
self.protocol = protocol
@property
def workbench(self):
return self.protocol.factory.workbench
@NotifySolved.responder
|
crypto101/arthur | arthur/util.py | MultiDeferred.tee | python | def tee(self):
if self._result is not _NO_RESULT:
if not self._isFailure:
return succeed(self._result)
else:
return fail(self._result)
d = Deferred()
self._deferreds.append(d)
return d | Produces a new deferred and returns it. If this C{MultiDeferred}
has not been fired (callbacked or errbacked) yet, the deferred
will not have been fired yet either, but will be fired when
and if this C{MultiDeferred} gets fired in the future. If this
C{MultiDeferred} has been fired, returns a deferred
synchronously fired with the same result.
@return: A deferred that will fire with whatever this object
is fired with.
@rtype: L{Deferred} | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/util.py#L19-L40 | null | class MultiDeferred(object):
"""An object that produces other deferreds. When this object is
callbacked or errbacked, those deferreds are callbacked or
errbacked with the same result or failure.
This class has been submitted to Twisted, see tm.tl/6365.
"""
def __init__(self):
self._deferreds = []
self._result = _NO_RESULT
self._isFailure = None
def callback(self, result):
"""
Callbacks the deferreds previously produced by this object.
@param result: The object which will be passed to the
C{callback} method of all C{Deferred}s previously produced by
this object's C{tee} method.
@raise AlreadyCalledError: If L{callback} or L{errback} has
already been called on this object.
"""
self._setResult(result)
self._isFailure = False
for d in self._deferreds:
d.callback(result)
def errback(self, failure):
"""
Errbacks the deferreds previously produced by this object.
@param failure: The object which will be passed to the
C{errback} method of all C{Deferred}s previously produced by
this object's C{tee} method.
@raise AlreadyCalledError: If L{callback} or L{errback} has
already been called on this object.
"""
self._setResult(failure)
self._isFailure = True
for d in self._deferreds:
d.errback(failure)
def _setResult(self, result):
"""
Sets the result. If the result is already set, raises
C{AlreadyCalledError}.
@raise AlreadyCalledError: The result was already set.
@return: C{None}, if the result was successfully set.
"""
if self._result is not _NO_RESULT:
raise AlreadyCalledError()
self._result = result
|
crypto101/arthur | arthur/util.py | MultiDeferred.callback | python | def callback(self, result):
self._setResult(result)
self._isFailure = False
for d in self._deferreds:
d.callback(result) | Callbacks the deferreds previously produced by this object.
@param result: The object which will be passed to the
C{callback} method of all C{Deferred}s previously produced by
this object's C{tee} method.
@raise AlreadyCalledError: If L{callback} or L{errback} has
already been called on this object. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/util.py#L43-L57 | [
"def _setResult(self, result):\n \"\"\"\n Sets the result. If the result is already set, raises\n C{AlreadyCalledError}.\n\n @raise AlreadyCalledError: The result was already set.\n @return: C{None}, if the result was successfully set.\n \"\"\"\n if self._result is not _NO_RESULT:\n raise AlreadyCalledError()\n\n self._result = result\n"
] | class MultiDeferred(object):
"""An object that produces other deferreds. When this object is
callbacked or errbacked, those deferreds are callbacked or
errbacked with the same result or failure.
This class has been submitted to Twisted, see tm.tl/6365.
"""
def __init__(self):
self._deferreds = []
self._result = _NO_RESULT
self._isFailure = None
def tee(self):
"""
Produces a new deferred and returns it. If this C{MultiDeferred}
has not been fired (callbacked or errbacked) yet, the deferred
will not have been fired yet either, but will be fired when
and if this C{MultiDeferred} gets fired in the future. If this
C{MultiDeferred} has been fired, returns a deferred
synchronously fired with the same result.
@return: A deferred that will fire with whatever this object
is fired with.
@rtype: L{Deferred}
"""
if self._result is not _NO_RESULT:
if not self._isFailure:
return succeed(self._result)
else:
return fail(self._result)
d = Deferred()
self._deferreds.append(d)
return d
def errback(self, failure):
"""
Errbacks the deferreds previously produced by this object.
@param failure: The object which will be passed to the
C{errback} method of all C{Deferred}s previously produced by
this object's C{tee} method.
@raise AlreadyCalledError: If L{callback} or L{errback} has
already been called on this object.
"""
self._setResult(failure)
self._isFailure = True
for d in self._deferreds:
d.errback(failure)
def _setResult(self, result):
"""
Sets the result. If the result is already set, raises
C{AlreadyCalledError}.
@raise AlreadyCalledError: The result was already set.
@return: C{None}, if the result was successfully set.
"""
if self._result is not _NO_RESULT:
raise AlreadyCalledError()
self._result = result
|
crypto101/arthur | arthur/util.py | MultiDeferred.errback | python | def errback(self, failure):
self._setResult(failure)
self._isFailure = True
for d in self._deferreds:
d.errback(failure) | Errbacks the deferreds previously produced by this object.
@param failure: The object which will be passed to the
C{errback} method of all C{Deferred}s previously produced by
this object's C{tee} method.
@raise AlreadyCalledError: If L{callback} or L{errback} has
already been called on this object. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/util.py#L60-L74 | [
"def _setResult(self, result):\n \"\"\"\n Sets the result. If the result is already set, raises\n C{AlreadyCalledError}.\n\n @raise AlreadyCalledError: The result was already set.\n @return: C{None}, if the result was successfully set.\n \"\"\"\n if self._result is not _NO_RESULT:\n raise AlreadyCalledError()\n\n self._result = result\n"
] | class MultiDeferred(object):
"""An object that produces other deferreds. When this object is
callbacked or errbacked, those deferreds are callbacked or
errbacked with the same result or failure.
This class has been submitted to Twisted, see tm.tl/6365.
"""
def __init__(self):
self._deferreds = []
self._result = _NO_RESULT
self._isFailure = None
def tee(self):
"""
Produces a new deferred and returns it. If this C{MultiDeferred}
has not been fired (callbacked or errbacked) yet, the deferred
will not have been fired yet either, but will be fired when
and if this C{MultiDeferred} gets fired in the future. If this
C{MultiDeferred} has been fired, returns a deferred
synchronously fired with the same result.
@return: A deferred that will fire with whatever this object
is fired with.
@rtype: L{Deferred}
"""
if self._result is not _NO_RESULT:
if not self._isFailure:
return succeed(self._result)
else:
return fail(self._result)
d = Deferred()
self._deferreds.append(d)
return d
def callback(self, result):
"""
Callbacks the deferreds previously produced by this object.
@param result: The object which will be passed to the
C{callback} method of all C{Deferred}s previously produced by
this object's C{tee} method.
@raise AlreadyCalledError: If L{callback} or L{errback} has
already been called on this object.
"""
self._setResult(result)
self._isFailure = False
for d in self._deferreds:
d.callback(result)
def _setResult(self, result):
"""
Sets the result. If the result is already set, raises
C{AlreadyCalledError}.
@raise AlreadyCalledError: The result was already set.
@return: C{None}, if the result was successfully set.
"""
if self._result is not _NO_RESULT:
raise AlreadyCalledError()
self._result = result
|
crypto101/arthur | arthur/util.py | MultiDeferred._setResult | python | def _setResult(self, result):
if self._result is not _NO_RESULT:
raise AlreadyCalledError()
self._result = result | Sets the result. If the result is already set, raises
C{AlreadyCalledError}.
@raise AlreadyCalledError: The result was already set.
@return: C{None}, if the result was successfully set. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/util.py#L77-L88 | null | class MultiDeferred(object):
"""An object that produces other deferreds. When this object is
callbacked or errbacked, those deferreds are callbacked or
errbacked with the same result or failure.
This class has been submitted to Twisted, see tm.tl/6365.
"""
def __init__(self):
self._deferreds = []
self._result = _NO_RESULT
self._isFailure = None
def tee(self):
"""
Produces a new deferred and returns it. If this C{MultiDeferred}
has not been fired (callbacked or errbacked) yet, the deferred
will not have been fired yet either, but will be fired when
and if this C{MultiDeferred} gets fired in the future. If this
C{MultiDeferred} has been fired, returns a deferred
synchronously fired with the same result.
@return: A deferred that will fire with whatever this object
is fired with.
@rtype: L{Deferred}
"""
if self._result is not _NO_RESULT:
if not self._isFailure:
return succeed(self._result)
else:
return fail(self._result)
d = Deferred()
self._deferreds.append(d)
return d
def callback(self, result):
"""
Callbacks the deferreds previously produced by this object.
@param result: The object which will be passed to the
C{callback} method of all C{Deferred}s previously produced by
this object's C{tee} method.
@raise AlreadyCalledError: If L{callback} or L{errback} has
already been called on this object.
"""
self._setResult(result)
self._isFailure = False
for d in self._deferreds:
d.callback(result)
def errback(self, failure):
"""
Errbacks the deferreds previously produced by this object.
@param failure: The object which will be passed to the
C{errback} method of all C{Deferred}s previously produced by
this object's C{tee} method.
@raise AlreadyCalledError: If L{callback} or L{errback} has
already been called on this object.
"""
self._setResult(failure)
self._isFailure = True
for d in self._deferreds:
d.errback(failure)
|
crypto101/arthur | arthur/run.py | buildWorkbenchWithLauncher | python | def buildWorkbenchWithLauncher():
workbench = ui.Workbench()
tools = [exercises.SearchTool()]
launcher = ui.Launcher(workbench, tools)
workbench.display(launcher)
return workbench, launcher | Builds a workbench.
The workbench has a launcher with all of the default tools. The
launcher will be displayed on the workbench. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/run.py#L7-L20 | [
"def display(self, tool):\n \"\"\"Displays the given tool above the current layer, and sets the\n title to its name.\n\n \"\"\"\n self._tools.append(tool)\n self._justDisplay(tool)\n"
] | import urwid
from arthur import exercises, ui
from functools import partial
def buildMainLoop(workbench, launcher, **kwargs):
"""Builds a main loop from the given workbench and launcher.
The main loop will have the default pallette, as well as the
default unused key handler. The key handler will have a reference
to the workbench and launcher so that it can clear the screen.
The extra keyword arguments are passed to the main loop.
"""
unhandledInput = partial(ui._unhandledInput,
workbench=workbench,
launcher=launcher)
mainLoop = urwid.MainLoop(widget=workbench.widget,
palette=ui.DEFAULT_PALETTE,
unhandled_input=unhandledInput,
event_loop=urwid.TwistedEventLoop(),
**kwargs)
return mainLoop
|
crypto101/arthur | arthur/run.py | buildMainLoop | python | def buildMainLoop(workbench, launcher, **kwargs):
unhandledInput = partial(ui._unhandledInput,
workbench=workbench,
launcher=launcher)
mainLoop = urwid.MainLoop(widget=workbench.widget,
palette=ui.DEFAULT_PALETTE,
unhandled_input=unhandledInput,
event_loop=urwid.TwistedEventLoop(),
**kwargs)
return mainLoop | Builds a main loop from the given workbench and launcher.
The main loop will have the default pallette, as well as the
default unused key handler. The key handler will have a reference
to the workbench and launcher so that it can clear the screen.
The extra keyword arguments are passed to the main loop. | train | https://github.com/crypto101/arthur/blob/c32e693fb5af17eac010e3b20f7653ed6e11eb6a/arthur/run.py#L23-L40 | null | import urwid
from arthur import exercises, ui
from functools import partial
def buildWorkbenchWithLauncher():
"""Builds a workbench.
The workbench has a launcher with all of the default tools. The
launcher will be displayed on the workbench.
"""
workbench = ui.Workbench()
tools = [exercises.SearchTool()]
launcher = ui.Launcher(workbench, tools)
workbench.display(launcher)
return workbench, launcher
|
dwwkelly/note | note/web.py | check_auth | python | def check_auth(username, password):
with open(os.path.expanduser("~/config/.note.conf")) as fd:
config = json.loads(fd.read())
try:
u = config['server']['login']['username']
p = config['server']['login']['password']
except KeyError:
print "cannot start server"
sys.exit(1)
return username == u and password == p | This function is called to check if a username /
password combination is valid. | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/web.py#L27-L42 | null | #!/usr/bin/env python
import time
import json
import os
import sys
import site
import pkgutil
import markdown
from flask import Flask
from flask import request
from flask import Response
from flask import render_template
from flask import Markup
from functools import wraps
from mongo_driver import mongoDB
__author__ = "Devin Kelly"
tf = os.path.join(site.getsitepackages()[0], 'note', 'templates')
sf = os.path.join(site.getsitepackages()[0], 'note', 'static')
app = Flask(__name__, template_folder=tf, static_folder=sf)
app.jinja_env.trim_blocks = True
def authenticate():
"""
Sends a 401 response that enables basic auth
"""
return Response('Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
"""
"""
with open(os.path.expanduser("~/config/.note.conf")) as fd:
config = json.loads(fd.read())
try:
login = config['server']['login']
if 'username' in login.keys() and 'password' in login.keys():
authOn = True
else:
authOn = False
except KeyError:
authOn = False
if authOn:
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
else:
@wraps(f)
def decorated(*args, **kwargs):
return f(*args, **kwargs)
return decorated
@app.route(r'/search', methods=["GET", "POST"])
# @requires_auth
def search():
db = mongoDB("note")
if request.method == "GET":
pages = {"cmd": {"link": "search", "name": "Search"},
"title": "Search"}
s = render_template('search.html', p=pages)
elif request.method == "POST" and request.form["api"] == "false":
pages = {"cmd": {"link": "search", "name": "Search Result"},
"title": "Search Result"}
term = request.form["term"]
s = request.form['options']
results = db.searchForItem(term, sortBy=s)
for r in results:
text = Markup(markdown.markdown(r['obj']['note']))
r['obj']['note'] = text
s = render_template('searchResult.html',
p=pages,
searchResults=results)
elif request.method == "POST" and request.form["api"] == "true":
term = request.form["term"]
s = json.dumps(db.searchForItem(term))
else:
s = "not valid"
return s
@app.route(r'/newPlace', methods=["GET", "POST"])
# @requires_auth
def newPlace():
db = mongoDB("note")
if request.method == "GET":
pages = {"cmd": {"link": "newPlace", "name": "New Place"},
"title": "New Place"}
s = render_template('newPlace.html', p=pages)
elif request.method == "POST" and request.form["api"] == "false":
pages = {"cmd": {"link": "newPlace", "name": "Place Added"}, "title":
"Place Added"}
loc = request.form["location"]
tags = request.form["tags"]
noteText = request.form["note"]
place = {"location": loc, "note": noteText, "tags": tags}
s = render_template('placeAdded.html', p=pages, placeInfo=place)
elif request.method == "POST" and request.form["api"] == "true":
term = request.form["term"]
s = json.dumps(db.searchForItem(term))
else:
s = "not valid"
return s
@app.route('/delete', methods=["GET", "POST"])
# @requires_auth
def Delete():
db = mongoDB("note")
pages = {"cmd": {"link": "delete", "name": "Delete"}, "title": "Delete"}
if request.method == "GET":
s = render_template('delete.html', p=pages)
elif request.method == "POST" and request.form["api"] == "false":
ID = int(request.form["ID"])
db.deleteItem(ID)
s = render_template('deleted.html', p=pages, itemID=ID)
elif request.method == "POST" and request.form["api"] == "true":
ID = int(request.form["ID"])
result = db.deleteItem(ID)
retVal = {"result": result, "ID": ID}
s = json.dumps(retVal)
else:
s = u"not valid"
return s
@app.route('/note', methods=["GET"])
# @requires_auth
def start():
pages = {"cmd": {"link": "note", "name": "Note"}, "title": "Note"}
s = render_template('note.html', p=pages)
return s
@app.route('/notes', methods=["GET"])
# @requires_auth
def Notes():
# FIXME -- this function needs a template
pages = {"cmd": {"link": "search", "name": "Search Result"},
"title": "Search Result"}
resultsHTML = []
s = render_template('searchResult.html',
p=pages,
searchResults=resultsHTML)
return s
@app.route('/newNote', methods=["GET", "POST"])
# @requires_auth
def NewNote():
db = mongoDB("note")
pages = {"cmd": {"link": "newNote", "name": "New Note"},
"title": "New Note"}
if request.method == "GET":
s = render_template('newNote.html', p=pages)
elif request.method == "POST" and request.form["api"] == "false":
tags = request.form["tags"]
noteText = request.form["note"]
db.addItem("notes", {"note": noteText, "tags": tags})
s = render_template('noteAdded.html',
p=pages,
note=noteText,
tags=tags)
elif request.method == "POST" and request.form["api"] == "true":
noteText = request.form["note"]
tags = request.form["tags"].split(",")
note = {"note": noteText, "tags": tags, "timestamp": time.time()}
db.addItem("notes", {"note": noteText, "tags": tags})
s = json.dumps(note)
return s
@app.route('/newContact', methods=["GET", "POST"])
# @requires_auth
def NewContact():
pages = {"cmd": {"link": "newContact", "name": "New Contact"},
"title": "New Contact"}
if request.method == "GET":
s = render_template('newContact.html', p=pages)
elif request.method == "POST" and request.form["api"] == "false":
name = request.form["name"]
affiliation = request.form["affiliation"]
email = request.form["email"]
work = request.form["work"]
home = request.form["home"]
mobile = request.form["mobile"]
address = request.form["address"]
contactInfo = {"NAME": name,
"WORK PHONE": work,
"AFFILIATION": affiliation,
"MOBILE PHONE": mobile,
"ADDRESS": address,
"EMAIL": email,
"HOME PHONE": home}
db.addItem("contacts", contactInfo)
s = render_template('contactAdded.html', p=pages, contact=contactInfo)
elif request.method == "POST" and request.form["api"] == "true":
contactText = request.form["contactText"]
tags = request.form["tags"].split(",")
contact = {"contactText": contactText,
"tags": tags,
"timestamp": time.time()}
s = json.dumps(contact)
return s
@app.route('/newTodo', methods=["GET", "POST"])
# @requires_auth
def NewTodo():
db = mongoDB("note")
pages = {"cmd": {"link": "newTodo", "name": "New ToDo"},
"title": "New ToDo"}
if request.method == "GET":
s = render_template('newTodo.html', p=pages)
elif request.method == "POST" and request.form["api"] == "false":
todoText = request.form["todoText"]
done = str(request.form['options'])
date = request.form['date']
done = (done == "done")
todoItem = {"todoText": todoText,
"done": done,
"date": time.mktime(time.strptime(date, "%m/%d/%Y"))}
db.addItem("todos", todoItem)
todoItem['done'] = str(todoItem['done'])
todoItem['date'] = str(todoItem['date'])
s = render_template('todoAdded.html', p=pages, todo=todoItem)
elif request.method == "POST" and request.form["api"] == "true":
todoText = request.form["todoText"]
tags = request.form["tags"].split(",")
todo = {"todoText": todoText, "tags": tags, "timestamp": time.time()}
dateStr = time.mktime(time.strptime(date, "%m/%d/%Y"))
db.addItem("todos", {"todoText": todoText,
"done": done,
"date": dateStr})
s = json.dumps(todo)
return s
|
dwwkelly/note | note/util.py | scrubID | python | def scrubID(ID):
try:
if type(ID) == list:
return int(ID[0])
elif type(ID) == str:
return int(ID)
elif type(ID) == int:
return ID
elif type(ID) == unicode:
return int(ID)
except ValueError:
return None | :param ID: An ID that can be of various types, this is very kludgy
:returns: An integer ID | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/util.py#L29-L46 | null | import subprocess as SP
import os
itemTypes = ["note", "todo", "contact", "place"]
colors = dict()
colors['reset'] = "\033[0m" # reset
colors['hicolor'] = "\033[1m" # hicolor
colors['underline'] = "\033[4m" # underline
colors['invert'] = "\033[7m" # invert foreground and background
colors['foreground black'] = "\033[30m"
colors['foreground red'] = "\033[31m"
colors['foreground green'] = "\033[32m"
colors['foreground yellow'] = "\033[33m"
colors['foreground blue'] = "\033[34m"
colors['foreground magenta'] = "\033[35m"
colors['foreground cyan'] = "\033[36m"
colors['foreground white'] = "\033[37m"
colors['background black'] = "\033[40m"
colors['background red'] = "\033[41m"
colors['background green'] = "\033[42m"
colors['background yellow'] = "\033[43m"
colors['background blue'] = "\033[44m"
colors['background magenta'] = "\033[45m"
colors['background cyan'] = "\033[46m"
colors['background white'] = "\033[47m"
def which(bin_name):
"""
:param bin_name: the name of the binary to test for (e.g. vim)
:returns: True or False depending on wether the binary exists
"""
with open(os.devnull) as devnull:
# rc = SP.call(['which', bin_name], stdout=devnull, stderr=devnull)
rc = SP.call(['which', bin_name])
return rc
|
dwwkelly/note | note/util.py | which | python | def which(bin_name):
with open(os.devnull) as devnull:
# rc = SP.call(['which', bin_name], stdout=devnull, stderr=devnull)
rc = SP.call(['which', bin_name])
return rc | :param bin_name: the name of the binary to test for (e.g. vim)
:returns: True or False depending on wether the binary exists | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/util.py#L49-L59 | null | import subprocess as SP
import os
itemTypes = ["note", "todo", "contact", "place"]
colors = dict()
colors['reset'] = "\033[0m" # reset
colors['hicolor'] = "\033[1m" # hicolor
colors['underline'] = "\033[4m" # underline
colors['invert'] = "\033[7m" # invert foreground and background
colors['foreground black'] = "\033[30m"
colors['foreground red'] = "\033[31m"
colors['foreground green'] = "\033[32m"
colors['foreground yellow'] = "\033[33m"
colors['foreground blue'] = "\033[34m"
colors['foreground magenta'] = "\033[35m"
colors['foreground cyan'] = "\033[36m"
colors['foreground white'] = "\033[37m"
colors['background black'] = "\033[40m"
colors['background red'] = "\033[41m"
colors['background green'] = "\033[42m"
colors['background yellow'] = "\033[43m"
colors['background blue'] = "\033[44m"
colors['background magenta'] = "\033[45m"
colors['background cyan'] = "\033[46m"
colors['background white'] = "\033[47m"
def scrubID(ID):
"""
:param ID: An ID that can be of various types, this is very kludgy
:returns: An integer ID
"""
try:
if type(ID) == list:
return int(ID[0])
elif type(ID) == str:
return int(ID)
elif type(ID) == int:
return ID
elif type(ID) == unicode:
return int(ID)
except ValueError:
return None
|
dwwkelly/note | note/mongo_driver.py | mongoDB.addItem | python | def addItem(self, itemType, itemContents, itemID=None):
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID | :param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L51-L77 | [
"def getNewID(self):\n \"\"\"\n\n :desc: Get a new ID by either incrementing the currentMax ID\n or using an unusedID\n :returns: A new, valid, ID\n :rval: int\n \"\"\"\n idCollection = self.noteDB['IDs']\n query = {\"unusedIDs\": {\"$exists\": True}}\n unusedIDs = idCollection.find_one(query)['unusedIDs']\n\n if not unusedIDs:\n query = {\"currentMax\": {\"$exists\": True}}\n ID = idCollection.find_one(query)['currentMax'] + 1\n idCollection.update({\"currentMax\": ID - 1}, {\"currentMax\": ID})\n else:\n query = {\"unusedIDs\": {\"$exists\": True}}\n unusedIDs = idCollection.find_one(query)['unusedIDs']\n ID = min(unusedIDs)\n unusedIDs.remove(ID)\n idCollection.update({\"unusedIDs\": {\"$exists\": True}},\n {\"$set\": {\"unusedIDs\": unusedIDs}})\n\n return int(ID)\n"
] | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getItem(self, itemID):
"""
:desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int
"""
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def getItemType(self, itemID):
"""
:desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str
"""
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll
def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
"""
:desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list
"""
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults
def deleteItem(self, itemID):
"""
:desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int
"""
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs
def makeBackupFile(self, dstPath, fileName):
"""
:param str dstPath: The destination path of the backup file
:param str fileName: The filename to use
"""
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)])
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent"
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
dwwkelly/note | note/mongo_driver.py | mongoDB.getItem | python | def getItem(self, itemID):
collections = self.get_data_collections()
itemID = scrubID(itemID)
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
del note["_id"]
note['type'] = coll
break
return note | :desc: Given an ID return the note JSON object
{u'note': u'note8',
u'ID': 3.0,
u'tags': [u'8'],
u'timestamps': [1381719620.315899]}
:param int itemID: The item ID, an integer
:returns: The matching note
:rval: int | train | https://github.com/dwwkelly/note/blob/b41d5fe1e4a3b67b50285168dd58f903bf219e6c/note/mongo_driver.py#L144-L166 | [
"def scrubID(ID):\n \"\"\"\n :param ID: An ID that can be of various types, this is very kludgy\n :returns: An integer ID\n \"\"\"\n\n try:\n if type(ID) == list:\n return int(ID[0])\n elif type(ID) == str:\n return int(ID)\n elif type(ID) == int:\n return ID\n elif type(ID) == unicode:\n return int(ID)\n\n except ValueError:\n return None\n",
"def get_data_collections(self):\n collections = self.noteDB.collection_names()\n collections.remove(u'system.indexes')\n collections.remove(u'IDs')\n collections.remove(u'label')\n\n return collections\n"
] | class mongoDB(dbBaseClass):
def __init__(self, dbName, uri=None):
"""
:desc: Initialize the database driver
:param str dbName: The name of the database in mongo
:param str uri: The Mongo URI to use
"""
self.dbName = dbName
try:
self.client = pymongo.MongoClient(uri)
except pymongo.errors.ConnectionFailure:
print 'ERROR: Cannot open connection to database'
sys.exit(1)
# Make sure that text search is enabled for this database
adminDB = self.client['admin']
cmd = {"getParameter": 1, "textSearchEnabled": 1}
textSearchEnabled = adminDB.command(cmd)['textSearchEnabled']
if not textSearchEnabled:
adminDB.command({"setParameter": 1, "textSearchEnabled": "true"})
# Create database
self.noteDB = self.client[self.dbName]
# Initialize
query = ({"currentMax": {"$exists": True}})
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"currentMax": 0})
query = {"unusedIDs": {"$exists": True}}
if self.noteDB.IDs.find(query).count() == 0:
self.noteDB['IDs'].insert({"unusedIDs": []})
if 'label' not in self.noteDB.collection_names():
self.noteDB.create_collection('label')
def addItem(self, itemType, itemContents, itemID=None):
"""
:param str itemType: The type of the item, note, place, todo
:param dict itemContents: A dictionary of the item contents
:param int itemID: When editing a note, send the ID along with it
"""
if itemType not in self.noteDB.collection_names():
fields = [(ii, pymongo.TEXT) for ii in itemContents]
self.noteDB[itemType].ensure_index(fields)
collection = self.noteDB[itemType]
if itemID is None:
itemContents['timestamps'] = [time.time()]
itemID = self.getNewID()
itemContents["ID"] = itemID
collection.insert(itemContents)
else:
_id = collection.find_one({"ID": itemID})["_id"]
timestamps = collection.find_one({"ID": itemID})["timestamps"]
timestamps.append(time.time())
itemContents["timestamps"] = timestamps
itemContents["ID"] = itemID
collection.update({"_id": _id}, itemContents)
return itemID
def addLabel(self, label_name, ID):
"""
"""
if self.getIDByLabel(label_name) is not None:
return None
doc = {"name": label_name, "ID": ID}
self.noteDB['label'].insert(doc)
del doc['_id']
return doc
def getIDByLabel(self, label_name):
"""
"""
doc = {"name": label_name}
r = self.noteDB['label'].find_one(doc)
if r is None:
return None
if 'ID' in r:
return r['ID']
else:
return None
def deleteLabel(self, label_name):
"""
"""
doc = {"name": label_name}
self.noteDB['label'].remove(doc)
return
def getNewID(self):
"""
:desc: Get a new ID by either incrementing the currentMax ID
or using an unusedID
:returns: A new, valid, ID
:rval: int
"""
idCollection = self.noteDB['IDs']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
if not unusedIDs:
query = {"currentMax": {"$exists": True}}
ID = idCollection.find_one(query)['currentMax'] + 1
idCollection.update({"currentMax": ID - 1}, {"currentMax": ID})
else:
query = {"unusedIDs": {"$exists": True}}
unusedIDs = idCollection.find_one(query)['unusedIDs']
ID = min(unusedIDs)
unusedIDs.remove(ID)
idCollection.update({"unusedIDs": {"$exists": True}},
{"$set": {"unusedIDs": unusedIDs}})
return int(ID)
def getAllItemTypes(self):
"""
:desc: Fetches a list of item types
:returns: A list of item types:
:rval: list
"""
collections = self.noteDB.collection_names()
return collections
def getItemType(self, itemID):
"""
:desc: Given an itemID, return the "type" i.e. the collection
it belongs to.
:param int itemID: The item ID, an integer
:returns: The note type
:rval: str
"""
collections = self.getAllItemTypes()
for coll in collections:
note = self.noteDB[coll].find_one({"ID": itemID})
if note is not None:
return coll
def searchForItem(self, searchInfo, resultLimit=20, sortBy="relevance"):
"""
:desc: Given a search term returns a list of results that match
that term:
[{u'score': 5.5,
u'obj': {u'note': u'note8',
u'ID': 3.0,
u'timestamps': [1381719620.315899]}}]
:param str searchInfo: The search term
:returns: A list of matching notes
:rval: list
"""
searchResults = []
colls = self.get_data_collections()
proj = {"_id": 0}
for coll in colls:
res = self.noteDB.command("text",
coll,
search=searchInfo,
project=proj,
limit=resultLimit)['results']
for ii in res:
ii['type'] = coll
searchResults.extend(res)
if sortBy.lower() == "date":
k = (lambda x: max(x['obj']['timestamps']))
searchResults = sorted(searchResults, key=k)
elif sortBy.lower() == "id":
k = (lambda x: x['obj']['ID'])
searchResults = sorted(searchResults, key=k)
return searchResults
def deleteItem(self, itemID):
"""
:desc: Deletes item with ID = itemID, takes care of IDs collection
:param itemID: The item ID to delete
:type itemID: int
:raises: ValueError
:returns ID: The ID of the deleted item
:rval: int
"""
collections = self.get_data_collections()
query = {"currentMax": {"$exists": True}}
currentMax = self.noteDB["IDs"].find_one(query)['currentMax']
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)['unusedIDs']
if (itemID > currentMax) or (itemID in unusedIDs):
raise ValueError("ID {0} does not exist".format(itemID))
# Find document with ID
for coll in collections:
self.noteDB[coll].remove({"ID": itemID})
if currentMax == itemID:
self.noteDB['IDs'].update({"currentMax": currentMax},
{"currentMax": currentMax - 1})
else:
unusedIDs.append(itemID)
self.noteDB['IDs'].update({"unusedIDs": {"$exists": True}},
{"unusedIDs": unusedIDs})
return itemID
def getDone(self, done):
"""
:desc: Fetches a list of all the done ToDs
:param bool done: done or undone?
:returns: A list of matching IDs
:rval: list
"""
doneItems = self.noteDB['todo'] \
.find({"done": done}) \
.sort("date", pymongo.DESCENDING)
IDs = [ii['ID'] for ii in doneItems]
return IDs
def makeBackupFile(self, dstPath, fileName):
"""
:param str dstPath: The destination path of the backup file
:param str fileName: The filename to use
"""
with open(os.devnull) as devnull:
SP.call(['mongodump', '--db', self.dbName, '--out', dstPath],
stdout=devnull,
stderr=devnull)
SP.call(['zip',
'-r',
os.path.join(dstPath, fileName),
os.path.join(dstPath, self.dbName)],
stdout=devnull,
stderr=devnull)
SP.call(['rm', '-rf', os.path.join(dstPath, self.dbName)])
def getByTime(self, startTime=None, endTime=None):
"""
:desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list
"""
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc['ID'])
return IDs
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent"
def get_data_collections(self):
collections = self.noteDB.collection_names()
collections.remove(u'system.indexes')
collections.remove(u'IDs')
collections.remove(u'label')
return collections
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.