repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
crm416/semantic | semantic/solver.py | MathService._calculate | python | def _calculate(numbers, symbols):
if len(numbers) is 1:
return numbers[0]
precedence = [[pow], [mul, div], [add, sub]]
# Find most important operation
for op_group in precedence:
for i, op in enumerate(symbols):
if op in op_group:
# Apply operation
a = numbers[i]
b = numbers[i + 1]
result = MathService._applyBinary(a, b, op)
new_numbers = numbers[:i] + [result] + numbers[i + 2:]
new_symbols = symbols[:i] + symbols[i + 1:]
return MathService._calculate(new_numbers, new_symbols) | Calculates a final value given a set of numbers and symbols. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/solver.py#L126-L144 | [
"def _applyBinary(a, b, op):\n a = float(a)\n b = float(b)\n return op(a, b)\n",
"def _calculate(numbers, symbols):\n \"\"\"Calculates a final value given a set of numbers and symbols.\"\"\"\n if len(numbers) is 1:\n return numbers[0]\n\n precedence = [[pow], [mul, div], [add, sub]]\n\n # Find most important operation\n for op_group in precedence:\n for i, op in enumerate(symbols):\n if op in op_group:\n # Apply operation\n a = numbers[i]\n b = numbers[i + 1]\n result = MathService._applyBinary(a, b, op)\n new_numbers = numbers[:i] + [result] + numbers[i + 2:]\n new_symbols = symbols[:i] + symbols[i + 1:]\n\n return MathService._calculate(new_numbers, new_symbols)\n"
] | class MathService(object):
__constants__ = {
'e': e,
'E': e,
'EE': e,
'pi': pi,
'pie': pi
}
__unaryOperators__ = {
'log': log,
'sine': sin,
'sin': sin,
'cosine': cos,
'cos': cos,
'tan': tan,
'tangent': tan,
'arcsine': asin,
'arcsin': asin,
'asin': asin,
'arccosine': acos,
'arccos': acos,
'acos': acos,
'arctanget': atan,
'arctan': atan,
'atan': atan,
'sqrt': sqrt
}
__binaryOperators__ = {
'plus': add,
'add': add,
'sum': add,
'minus': sub,
'sub': sub,
'subtract': sub,
'less': sub,
'over': div,
'divide': div,
'times': mul,
'multiply': mul,
'to': pow
}
@staticmethod
def _applyBinary(a, b, op):
a = float(a)
b = float(b)
return op(a, b)
@staticmethod
def _applyUnary(a, op):
a = float(a)
return op(a)
@staticmethod
def _preprocess(inp):
"""Revise wording to match canonical and expected forms."""
inp = re.sub(r'(\b)a(\b)', r'\g<1>one\g<2>', inp)
inp = re.sub(r'to the (.*) power', r'to \g<1>', inp)
inp = re.sub(r'to the (.*?)(\b)', r'to \g<1>\g<2>', inp)
inp = re.sub(r'log of', r'log', inp)
inp = re.sub(r'(square )?root( of)?', r'sqrt', inp)
inp = re.sub(r'squared', r'to two', inp)
inp = re.sub(r'cubed', r'to three', inp)
inp = re.sub(r'divided?( by)?', r'divide', inp)
inp = re.sub(r'(\b)over(\b)', r'\g<1>divide\g<2>', inp)
inp = re.sub(r'(\b)EE(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)E(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)pie(\b)', r'\g<1>pi\g<2>', inp)
inp = re.sub(r'(\b)PI(\b)', r'\g<1>pi\g<2>', inp)
def findImplicitMultiplications(inp):
"""Replace omitted 'times' references."""
def findConstantMultiplications(inp):
split = inp.split(' ')
revision = ""
converter = NumberService()
for i, w in enumerate(split):
if i > 0 and w in MathService.__constants__:
if converter.isValid(split[i - 1]):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
def findUnaryMultiplications(inp):
split = inp.split(' ')
revision = ""
for i, w in enumerate(split):
if i > 0 and w in MathService.__unaryOperators__:
last_op = split[i - 1]
binary = last_op in MathService.__binaryOperators__
unary = last_op in MathService.__unaryOperators__
if last_op and not (binary or unary):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
return findUnaryMultiplications(findConstantMultiplications(inp))
return findImplicitMultiplications(inp)
@staticmethod
def parseEquation(self, inp):
"""Solves the equation specified by the input string.
Args:
inp (str): An equation, specified in words, containing some
combination of numbers, binary, and unary operations.
Returns:
The floating-point result of carrying out the computation.
"""
inp = MathService._preprocess(inp)
split = inp.split(' ')
# Recursive call on unary operators
for i, w in enumerate(split):
if w in self.__unaryOperators__:
op = self.__unaryOperators__[w]
# Split equation into halves
eq1 = ' '.join(split[:i])
eq2 = ' '.join(split[i + 1:])
# Calculate second half
result = MathService._applyUnary(self.parseEquation(eq2), op)
return self.parseEquation(eq1 + " " + str(result))
def extractNumbersAndSymbols(inp):
numbers = []
symbols = []
# Divide into values (numbers), operators (symbols)
next_number = ""
for w in inp.split(' '):
if w in self.__binaryOperators__:
symbols.append(self.__binaryOperators__[w])
if next_number:
numbers.append(next_number)
next_number = ""
else:
if next_number:
next_number += " "
next_number += w
if next_number:
numbers.append(next_number)
# Cast numbers from words to integers
def convert(n):
if n in self.__constants__:
return self.__constants__[n]
converter = NumberService()
return converter.parse(n)
numbers = [convert(n) for n in numbers]
return numbers, symbols
numbers, symbols = extractNumbersAndSymbols(inp)
return MathService._calculate(numbers, symbols)
|
crm416/semantic | semantic/solver.py | MathService.parseEquation | python | def parseEquation(self, inp):
inp = MathService._preprocess(inp)
split = inp.split(' ')
# Recursive call on unary operators
for i, w in enumerate(split):
if w in self.__unaryOperators__:
op = self.__unaryOperators__[w]
# Split equation into halves
eq1 = ' '.join(split[:i])
eq2 = ' '.join(split[i + 1:])
# Calculate second half
result = MathService._applyUnary(self.parseEquation(eq2), op)
return self.parseEquation(eq1 + " " + str(result))
def extractNumbersAndSymbols(inp):
numbers = []
symbols = []
# Divide into values (numbers), operators (symbols)
next_number = ""
for w in inp.split(' '):
if w in self.__binaryOperators__:
symbols.append(self.__binaryOperators__[w])
if next_number:
numbers.append(next_number)
next_number = ""
else:
if next_number:
next_number += " "
next_number += w
if next_number:
numbers.append(next_number)
# Cast numbers from words to integers
def convert(n):
if n in self.__constants__:
return self.__constants__[n]
converter = NumberService()
return converter.parse(n)
numbers = [convert(n) for n in numbers]
return numbers, symbols
numbers, symbols = extractNumbersAndSymbols(inp)
return MathService._calculate(numbers, symbols) | Solves the equation specified by the input string.
Args:
inp (str): An equation, specified in words, containing some
combination of numbers, binary, and unary operations.
Returns:
The floating-point result of carrying out the computation. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/solver.py#L146-L209 | [
"def _applyUnary(a, op):\n a = float(a)\n return op(a)\n",
"def _preprocess(inp):\n \"\"\"Revise wording to match canonical and expected forms.\"\"\"\n inp = re.sub(r'(\\b)a(\\b)', r'\\g<1>one\\g<2>', inp)\n inp = re.sub(r'to the (.*) power', r'to \\g<1>', inp)\n inp = re.sub(r'to the (.*?)(\\b)', r'to \\g<1>\\g<2>', inp)\n inp = re.sub(r'log of', r'log', inp)\n inp = re.sub(r'(square )?root( of)?', r'sqrt', inp)\n inp = re.sub(r'squared', r'to two', inp)\n inp = re.sub(r'cubed', r'to three', inp)\n inp = re.sub(r'divided?( by)?', r'divide', inp)\n inp = re.sub(r'(\\b)over(\\b)', r'\\g<1>divide\\g<2>', inp)\n inp = re.sub(r'(\\b)EE(\\b)', r'\\g<1>e\\g<2>', inp)\n inp = re.sub(r'(\\b)E(\\b)', r'\\g<1>e\\g<2>', inp)\n inp = re.sub(r'(\\b)pie(\\b)', r'\\g<1>pi\\g<2>', inp)\n inp = re.sub(r'(\\b)PI(\\b)', r'\\g<1>pi\\g<2>', inp)\n\n def findImplicitMultiplications(inp):\n \"\"\"Replace omitted 'times' references.\"\"\"\n\n def findConstantMultiplications(inp):\n split = inp.split(' ')\n revision = \"\"\n\n converter = NumberService()\n for i, w in enumerate(split):\n if i > 0 and w in MathService.__constants__:\n if converter.isValid(split[i - 1]):\n revision += \" times\"\n if not revision:\n revision = w\n else:\n revision += \" \" + w\n\n return revision\n\n def findUnaryMultiplications(inp):\n split = inp.split(' ')\n revision = \"\"\n\n for i, w in enumerate(split):\n if i > 0 and w in MathService.__unaryOperators__:\n last_op = split[i - 1]\n\n binary = last_op in MathService.__binaryOperators__\n unary = last_op in MathService.__unaryOperators__\n\n if last_op and not (binary or unary):\n revision += \" times\"\n if not revision:\n revision = w\n else:\n revision += \" \" + w\n\n return revision\n\n return findUnaryMultiplications(findConstantMultiplications(inp))\n\n return findImplicitMultiplications(inp)\n",
"def _calculate(numbers, symbols):\n \"\"\"Calculates a final value given a set of numbers and symbols.\"\"\"\n if len(numbers) is 1:\n return numbers[0]\n\n precedence = [[pow], [mul, div], [add, sub]]\n\n # Find most important operation\n for op_group in precedence:\n for i, op in enumerate(symbols):\n if op in op_group:\n # Apply operation\n a = numbers[i]\n b = numbers[i + 1]\n result = MathService._applyBinary(a, b, op)\n new_numbers = numbers[:i] + [result] + numbers[i + 2:]\n new_symbols = symbols[:i] + symbols[i + 1:]\n\n return MathService._calculate(new_numbers, new_symbols)\n",
"def parseEquation(self, inp):\n \"\"\"Solves the equation specified by the input string.\n\n Args:\n inp (str): An equation, specified in words, containing some\n combination of numbers, binary, and unary operations.\n\n Returns:\n The floating-point result of carrying out the computation.\n \"\"\"\n inp = MathService._preprocess(inp)\n split = inp.split(' ')\n\n # Recursive call on unary operators\n for i, w in enumerate(split):\n if w in self.__unaryOperators__:\n op = self.__unaryOperators__[w]\n\n # Split equation into halves\n eq1 = ' '.join(split[:i])\n eq2 = ' '.join(split[i + 1:])\n\n # Calculate second half\n result = MathService._applyUnary(self.parseEquation(eq2), op)\n\n return self.parseEquation(eq1 + \" \" + str(result))\n\n def extractNumbersAndSymbols(inp):\n numbers = []\n symbols = []\n\n # Divide into values (numbers), operators (symbols)\n next_number = \"\"\n for w in inp.split(' '):\n if w in self.__binaryOperators__:\n symbols.append(self.__binaryOperators__[w])\n\n if next_number:\n numbers.append(next_number)\n next_number = \"\"\n\n else:\n if next_number:\n next_number += \" \"\n next_number += w\n\n if next_number:\n numbers.append(next_number)\n\n # Cast numbers from words to integers\n def convert(n):\n if n in self.__constants__:\n return self.__constants__[n]\n\n converter = NumberService()\n return converter.parse(n)\n\n numbers = [convert(n) for n in numbers]\n\n return numbers, symbols\n\n numbers, symbols = extractNumbersAndSymbols(inp)\n\n return MathService._calculate(numbers, symbols)\n",
"def extractNumbersAndSymbols(inp):\n numbers = []\n symbols = []\n\n # Divide into values (numbers), operators (symbols)\n next_number = \"\"\n for w in inp.split(' '):\n if w in self.__binaryOperators__:\n symbols.append(self.__binaryOperators__[w])\n\n if next_number:\n numbers.append(next_number)\n next_number = \"\"\n\n else:\n if next_number:\n next_number += \" \"\n next_number += w\n\n if next_number:\n numbers.append(next_number)\n\n # Cast numbers from words to integers\n def convert(n):\n if n in self.__constants__:\n return self.__constants__[n]\n\n converter = NumberService()\n return converter.parse(n)\n\n numbers = [convert(n) for n in numbers]\n\n return numbers, symbols\n"
] | class MathService(object):
__constants__ = {
'e': e,
'E': e,
'EE': e,
'pi': pi,
'pie': pi
}
__unaryOperators__ = {
'log': log,
'sine': sin,
'sin': sin,
'cosine': cos,
'cos': cos,
'tan': tan,
'tangent': tan,
'arcsine': asin,
'arcsin': asin,
'asin': asin,
'arccosine': acos,
'arccos': acos,
'acos': acos,
'arctanget': atan,
'arctan': atan,
'atan': atan,
'sqrt': sqrt
}
__binaryOperators__ = {
'plus': add,
'add': add,
'sum': add,
'minus': sub,
'sub': sub,
'subtract': sub,
'less': sub,
'over': div,
'divide': div,
'times': mul,
'multiply': mul,
'to': pow
}
@staticmethod
def _applyBinary(a, b, op):
a = float(a)
b = float(b)
return op(a, b)
@staticmethod
def _applyUnary(a, op):
a = float(a)
return op(a)
@staticmethod
def _preprocess(inp):
"""Revise wording to match canonical and expected forms."""
inp = re.sub(r'(\b)a(\b)', r'\g<1>one\g<2>', inp)
inp = re.sub(r'to the (.*) power', r'to \g<1>', inp)
inp = re.sub(r'to the (.*?)(\b)', r'to \g<1>\g<2>', inp)
inp = re.sub(r'log of', r'log', inp)
inp = re.sub(r'(square )?root( of)?', r'sqrt', inp)
inp = re.sub(r'squared', r'to two', inp)
inp = re.sub(r'cubed', r'to three', inp)
inp = re.sub(r'divided?( by)?', r'divide', inp)
inp = re.sub(r'(\b)over(\b)', r'\g<1>divide\g<2>', inp)
inp = re.sub(r'(\b)EE(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)E(\b)', r'\g<1>e\g<2>', inp)
inp = re.sub(r'(\b)pie(\b)', r'\g<1>pi\g<2>', inp)
inp = re.sub(r'(\b)PI(\b)', r'\g<1>pi\g<2>', inp)
def findImplicitMultiplications(inp):
"""Replace omitted 'times' references."""
def findConstantMultiplications(inp):
split = inp.split(' ')
revision = ""
converter = NumberService()
for i, w in enumerate(split):
if i > 0 and w in MathService.__constants__:
if converter.isValid(split[i - 1]):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
def findUnaryMultiplications(inp):
split = inp.split(' ')
revision = ""
for i, w in enumerate(split):
if i > 0 and w in MathService.__unaryOperators__:
last_op = split[i - 1]
binary = last_op in MathService.__binaryOperators__
unary = last_op in MathService.__unaryOperators__
if last_op and not (binary or unary):
revision += " times"
if not revision:
revision = w
else:
revision += " " + w
return revision
return findUnaryMultiplications(findConstantMultiplications(inp))
return findImplicitMultiplications(inp)
@staticmethod
def _calculate(numbers, symbols):
"""Calculates a final value given a set of numbers and symbols."""
if len(numbers) is 1:
return numbers[0]
precedence = [[pow], [mul, div], [add, sub]]
# Find most important operation
for op_group in precedence:
for i, op in enumerate(symbols):
if op in op_group:
# Apply operation
a = numbers[i]
b = numbers[i + 1]
result = MathService._applyBinary(a, b, op)
new_numbers = numbers[:i] + [result] + numbers[i + 2:]
new_symbols = symbols[:i] + symbols[i + 1:]
return MathService._calculate(new_numbers, new_symbols)
|
crm416/semantic | semantic/numbers.py | NumberService.parse | python | def parse(self, words):
def exact(words):
"""If already represented as float or int, convert."""
try:
return float(words)
except:
return None
guess = exact(words)
if guess is not None:
return guess
split = words.split(' ')
# Replace final ordinal/fraction with number
if split[-1] in self.__fractions__:
split[-1] = self.__fractions__[split[-1]]
elif split[-1] in self.__ordinals__:
split[-1] = self.__ordinals__[split[-1]]
parsed_ordinals = ' '.join(split)
return self.parseFloat(parsed_ordinals) | A general method for parsing word-representations of numbers.
Supports floats and integers.
Args:
words (str): Description of an arbitrary number.
Returns:
A double representation of the words. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/numbers.py#L91-L122 | [
"def parseFloat(self, words):\n \"\"\"Convert a floating-point number described in words to a double.\n\n Supports two kinds of descriptions: those with a 'point' (e.g.,\n \"one point two five\") and those with a fraction (e.g., \"one and\n a quarter\").\n\n Args:\n words (str): Description of the floating-point number.\n\n Returns:\n A double representation of the words.\n \"\"\"\n def pointFloat(words):\n m = re.search(r'(.*) point (.*)', words)\n if m:\n whole = m.group(1)\n frac = m.group(2)\n total = 0.0\n coeff = 0.10\n for digit in frac.split(' '):\n total += coeff * self.parse(digit)\n coeff /= 10.0\n\n return self.parseInt(whole) + total\n return None\n\n def fractionFloat(words):\n m = re.search(r'(.*) and (.*)', words)\n if m:\n whole = self.parseInt(m.group(1))\n frac = m.group(2)\n\n # Replace plurals\n frac = re.sub(r'(\\w+)s(\\b)', '\\g<1>\\g<2>', frac)\n\n # Convert 'a' to 'one' (e.g., 'a third' to 'one third')\n frac = re.sub(r'(\\b)a(\\b)', '\\g<1>one\\g<2>', frac)\n\n split = frac.split(' ')\n\n # Split fraction into num (regular integer), denom (ordinal)\n num = split[:1]\n denom = split[1:]\n\n while denom:\n try:\n # Test for valid num, denom\n num_value = self.parse(' '.join(num))\n denom_value = self.parse(' '.join(denom))\n return whole + float(num_value) / denom_value\n except:\n # Add another word to num\n num += denom[:1]\n denom = denom[1:]\n return None\n\n # Extract \"one point two five\"-type float\n result = pointFloat(words)\n if result:\n return result\n\n # Extract \"one and a quarter\"-type float\n result = fractionFloat(words)\n if result:\n return result\n\n # Parse as integer\n return self.parseInt(words)\n",
"def exact(words):\n \"\"\"If already represented as float or int, convert.\"\"\"\n try:\n return float(words)\n except:\n return None\n"
] | class NumberService(object):
__small__ = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
__magnitude__ = {
'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
__ordinals__ = {
'first': 'one',
'second': 'two',
'third': 'three',
'fourth': 'four',
'fifth': 'five',
'sixth': 'six',
'seventh': 'seventh',
'eighth': 'eight',
'ninth': 'nine',
'tenth': 'ten',
'eleventh': 'eleven',
'twelth': 'twelve',
'thirteenth': 'thirteen',
'fifteenth': 'fifteen',
'sixteenth': 'sixteen',
'seventeenth': 'seventeen',
'eighteenth': 'eighteen',
'nineteenth': 'nineteen',
'twentieth': 'twenty',
'thirtieth': 'thirty',
'fortieth': 'forty',
'fiftieth': 'fifty',
'sixtieth': 'sixty',
'seventieth': 'seventy',
'eightieth': 'eighty',
'ninetieth': 'ninety',
'hundredth': 'hundred'
}
__fractions__ = {
'quarter': 'four',
'half': 'two',
'halve': 'two'
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def parseFloat(self, words):
"""Convert a floating-point number described in words to a double.
Supports two kinds of descriptions: those with a 'point' (e.g.,
"one point two five") and those with a fraction (e.g., "one and
a quarter").
Args:
words (str): Description of the floating-point number.
Returns:
A double representation of the words.
"""
def pointFloat(words):
m = re.search(r'(.*) point (.*)', words)
if m:
whole = m.group(1)
frac = m.group(2)
total = 0.0
coeff = 0.10
for digit in frac.split(' '):
total += coeff * self.parse(digit)
coeff /= 10.0
return self.parseInt(whole) + total
return None
def fractionFloat(words):
m = re.search(r'(.*) and (.*)', words)
if m:
whole = self.parseInt(m.group(1))
frac = m.group(2)
# Replace plurals
frac = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', frac)
# Convert 'a' to 'one' (e.g., 'a third' to 'one third')
frac = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', frac)
split = frac.split(' ')
# Split fraction into num (regular integer), denom (ordinal)
num = split[:1]
denom = split[1:]
while denom:
try:
# Test for valid num, denom
num_value = self.parse(' '.join(num))
denom_value = self.parse(' '.join(denom))
return whole + float(num_value) / denom_value
except:
# Add another word to num
num += denom[:1]
denom = denom[1:]
return None
# Extract "one point two five"-type float
result = pointFloat(words)
if result:
return result
# Extract "one and a quarter"-type float
result = fractionFloat(words)
if result:
return result
# Parse as integer
return self.parseInt(words)
def parseInt(self, words):
"""Parses words to the integer they describe.
Args:
words (str): Description of the integer.
Returns:
An integer representation of the words.
"""
# Remove 'and', case-sensitivity
words = words.replace(" and ", " ").lower()
# 'a' -> 'one'
words = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', words)
def textToNumber(s):
"""
Converts raw number string to an integer.
Based on text2num.py by Greg Hewill.
"""
a = re.split(r"[\s-]+", s)
n = 0
g = 0
for w in a:
x = NumberService.__small__.get(w, None)
if x is not None:
g += x
elif w == "hundred":
g *= 100
else:
x = NumberService.__magnitude__.get(w, None)
if x is not None:
n += g * x
g = 0
else:
raise NumberService.NumberException(
"Unknown number: " + w)
return n + g
return textToNumber(words)
def isValid(self, inp):
try:
self.parse(inp)
return True
except:
return False
@staticmethod
def parseMagnitude(m):
"""Parses a number m into a human-ready string representation.
For example, crops off floats if they're too accurate.
Arguments:
m (float): Floating-point number to be cleaned.
Returns:
Human-ready string description of the number.
"""
m = NumberService().parse(m)
def toDecimalPrecision(n, k):
return float("%.*f" % (k, round(n, k)))
# Cast to two digits of precision
digits = 2
magnitude = toDecimalPrecision(m, digits)
# If value is really small, keep going
while not magnitude:
digits += 1
magnitude = toDecimalPrecision(m, digits)
# If item is less than one, go one beyond 'necessary' number of digits
if m < 1.0:
magnitude = toDecimalPrecision(m, digits + 1)
# Ignore decimal accuracy if irrelevant
if int(magnitude) == magnitude:
magnitude = int(magnitude)
# Adjust for scientific notation
magString = str(magnitude)
magString = re.sub(r'(\d)e-(\d+)',
'\g<1> times ten to the negative \g<2>', magString)
magString = re.sub(r'(\d)e\+(\d+)',
'\g<1> times ten to the \g<2>', magString)
magString = re.sub(r'-(\d+)', 'negative \g<1>', magString)
magString = re.sub(r'\b0(\d+)', '\g<1>', magString)
return magString
def longestNumber(self, inp):
"""Extracts the longest valid numerical description from a string.
Not guaranteed to return a result even if some valid numerical
description exists (i.e., method is not particularly advanced).
Args:
inp (str): An arbitrary string, hopefully containing a number.
Returns:
The number with the longest string description in input,
or None if not found.
"""
split = inp.split(' ')
# Assume just a single number
numStart = None
numEnd = None
for i, w in enumerate(split):
if self.isValid(w):
if numStart is None:
numStart = i
numEnd = i
else:
# Check for ordinal, which would signify end
w = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', w)
if w in self.__ordinals__:
if self.isValid(' '.join(split[numStart:i + 1])):
numEnd = i
break
description = ' '.join(split[numStart:numEnd + 1])
return self.parse(description)
|
crm416/semantic | semantic/numbers.py | NumberService.parseFloat | python | def parseFloat(self, words):
def pointFloat(words):
m = re.search(r'(.*) point (.*)', words)
if m:
whole = m.group(1)
frac = m.group(2)
total = 0.0
coeff = 0.10
for digit in frac.split(' '):
total += coeff * self.parse(digit)
coeff /= 10.0
return self.parseInt(whole) + total
return None
def fractionFloat(words):
m = re.search(r'(.*) and (.*)', words)
if m:
whole = self.parseInt(m.group(1))
frac = m.group(2)
# Replace plurals
frac = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', frac)
# Convert 'a' to 'one' (e.g., 'a third' to 'one third')
frac = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', frac)
split = frac.split(' ')
# Split fraction into num (regular integer), denom (ordinal)
num = split[:1]
denom = split[1:]
while denom:
try:
# Test for valid num, denom
num_value = self.parse(' '.join(num))
denom_value = self.parse(' '.join(denom))
return whole + float(num_value) / denom_value
except:
# Add another word to num
num += denom[:1]
denom = denom[1:]
return None
# Extract "one point two five"-type float
result = pointFloat(words)
if result:
return result
# Extract "one and a quarter"-type float
result = fractionFloat(words)
if result:
return result
# Parse as integer
return self.parseInt(words) | Convert a floating-point number described in words to a double.
Supports two kinds of descriptions: those with a 'point' (e.g.,
"one point two five") and those with a fraction (e.g., "one and
a quarter").
Args:
words (str): Description of the floating-point number.
Returns:
A double representation of the words. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/numbers.py#L124-L192 | [
"def parseInt(self, words):\n \"\"\"Parses words to the integer they describe.\n\n Args:\n words (str): Description of the integer.\n\n Returns:\n An integer representation of the words.\n \"\"\"\n # Remove 'and', case-sensitivity\n words = words.replace(\" and \", \" \").lower()\n # 'a' -> 'one'\n words = re.sub(r'(\\b)a(\\b)', '\\g<1>one\\g<2>', words)\n\n def textToNumber(s):\n \"\"\"\n Converts raw number string to an integer.\n Based on text2num.py by Greg Hewill.\n \"\"\"\n a = re.split(r\"[\\s-]+\", s)\n n = 0\n g = 0\n for w in a:\n x = NumberService.__small__.get(w, None)\n if x is not None:\n g += x\n elif w == \"hundred\":\n g *= 100\n else:\n x = NumberService.__magnitude__.get(w, None)\n if x is not None:\n n += g * x\n g = 0\n else:\n raise NumberService.NumberException(\n \"Unknown number: \" + w)\n return n + g\n\n return textToNumber(words)\n",
"def pointFloat(words):\n m = re.search(r'(.*) point (.*)', words)\n if m:\n whole = m.group(1)\n frac = m.group(2)\n total = 0.0\n coeff = 0.10\n for digit in frac.split(' '):\n total += coeff * self.parse(digit)\n coeff /= 10.0\n\n return self.parseInt(whole) + total\n return None\n",
"def fractionFloat(words):\n m = re.search(r'(.*) and (.*)', words)\n if m:\n whole = self.parseInt(m.group(1))\n frac = m.group(2)\n\n # Replace plurals\n frac = re.sub(r'(\\w+)s(\\b)', '\\g<1>\\g<2>', frac)\n\n # Convert 'a' to 'one' (e.g., 'a third' to 'one third')\n frac = re.sub(r'(\\b)a(\\b)', '\\g<1>one\\g<2>', frac)\n\n split = frac.split(' ')\n\n # Split fraction into num (regular integer), denom (ordinal)\n num = split[:1]\n denom = split[1:]\n\n while denom:\n try:\n # Test for valid num, denom\n num_value = self.parse(' '.join(num))\n denom_value = self.parse(' '.join(denom))\n return whole + float(num_value) / denom_value\n except:\n # Add another word to num\n num += denom[:1]\n denom = denom[1:]\n return None\n"
] | class NumberService(object):
__small__ = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
__magnitude__ = {
'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
__ordinals__ = {
'first': 'one',
'second': 'two',
'third': 'three',
'fourth': 'four',
'fifth': 'five',
'sixth': 'six',
'seventh': 'seventh',
'eighth': 'eight',
'ninth': 'nine',
'tenth': 'ten',
'eleventh': 'eleven',
'twelth': 'twelve',
'thirteenth': 'thirteen',
'fifteenth': 'fifteen',
'sixteenth': 'sixteen',
'seventeenth': 'seventeen',
'eighteenth': 'eighteen',
'nineteenth': 'nineteen',
'twentieth': 'twenty',
'thirtieth': 'thirty',
'fortieth': 'forty',
'fiftieth': 'fifty',
'sixtieth': 'sixty',
'seventieth': 'seventy',
'eightieth': 'eighty',
'ninetieth': 'ninety',
'hundredth': 'hundred'
}
__fractions__ = {
'quarter': 'four',
'half': 'two',
'halve': 'two'
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def parse(self, words):
"""A general method for parsing word-representations of numbers.
Supports floats and integers.
Args:
words (str): Description of an arbitrary number.
Returns:
A double representation of the words.
"""
def exact(words):
"""If already represented as float or int, convert."""
try:
return float(words)
except:
return None
guess = exact(words)
if guess is not None:
return guess
split = words.split(' ')
# Replace final ordinal/fraction with number
if split[-1] in self.__fractions__:
split[-1] = self.__fractions__[split[-1]]
elif split[-1] in self.__ordinals__:
split[-1] = self.__ordinals__[split[-1]]
parsed_ordinals = ' '.join(split)
return self.parseFloat(parsed_ordinals)
def parseInt(self, words):
"""Parses words to the integer they describe.
Args:
words (str): Description of the integer.
Returns:
An integer representation of the words.
"""
# Remove 'and', case-sensitivity
words = words.replace(" and ", " ").lower()
# 'a' -> 'one'
words = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', words)
def textToNumber(s):
"""
Converts raw number string to an integer.
Based on text2num.py by Greg Hewill.
"""
a = re.split(r"[\s-]+", s)
n = 0
g = 0
for w in a:
x = NumberService.__small__.get(w, None)
if x is not None:
g += x
elif w == "hundred":
g *= 100
else:
x = NumberService.__magnitude__.get(w, None)
if x is not None:
n += g * x
g = 0
else:
raise NumberService.NumberException(
"Unknown number: " + w)
return n + g
return textToNumber(words)
def isValid(self, inp):
try:
self.parse(inp)
return True
except:
return False
@staticmethod
def parseMagnitude(m):
"""Parses a number m into a human-ready string representation.
For example, crops off floats if they're too accurate.
Arguments:
m (float): Floating-point number to be cleaned.
Returns:
Human-ready string description of the number.
"""
m = NumberService().parse(m)
def toDecimalPrecision(n, k):
return float("%.*f" % (k, round(n, k)))
# Cast to two digits of precision
digits = 2
magnitude = toDecimalPrecision(m, digits)
# If value is really small, keep going
while not magnitude:
digits += 1
magnitude = toDecimalPrecision(m, digits)
# If item is less than one, go one beyond 'necessary' number of digits
if m < 1.0:
magnitude = toDecimalPrecision(m, digits + 1)
# Ignore decimal accuracy if irrelevant
if int(magnitude) == magnitude:
magnitude = int(magnitude)
# Adjust for scientific notation
magString = str(magnitude)
magString = re.sub(r'(\d)e-(\d+)',
'\g<1> times ten to the negative \g<2>', magString)
magString = re.sub(r'(\d)e\+(\d+)',
'\g<1> times ten to the \g<2>', magString)
magString = re.sub(r'-(\d+)', 'negative \g<1>', magString)
magString = re.sub(r'\b0(\d+)', '\g<1>', magString)
return magString
def longestNumber(self, inp):
"""Extracts the longest valid numerical description from a string.
Not guaranteed to return a result even if some valid numerical
description exists (i.e., method is not particularly advanced).
Args:
inp (str): An arbitrary string, hopefully containing a number.
Returns:
The number with the longest string description in input,
or None if not found.
"""
split = inp.split(' ')
# Assume just a single number
numStart = None
numEnd = None
for i, w in enumerate(split):
if self.isValid(w):
if numStart is None:
numStart = i
numEnd = i
else:
# Check for ordinal, which would signify end
w = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', w)
if w in self.__ordinals__:
if self.isValid(' '.join(split[numStart:i + 1])):
numEnd = i
break
description = ' '.join(split[numStart:numEnd + 1])
return self.parse(description)
|
crm416/semantic | semantic/numbers.py | NumberService.parseInt | python | def parseInt(self, words):
# Remove 'and', case-sensitivity
words = words.replace(" and ", " ").lower()
# 'a' -> 'one'
words = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', words)
def textToNumber(s):
"""
Converts raw number string to an integer.
Based on text2num.py by Greg Hewill.
"""
a = re.split(r"[\s-]+", s)
n = 0
g = 0
for w in a:
x = NumberService.__small__.get(w, None)
if x is not None:
g += x
elif w == "hundred":
g *= 100
else:
x = NumberService.__magnitude__.get(w, None)
if x is not None:
n += g * x
g = 0
else:
raise NumberService.NumberException(
"Unknown number: " + w)
return n + g
return textToNumber(words) | Parses words to the integer they describe.
Args:
words (str): Description of the integer.
Returns:
An integer representation of the words. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/numbers.py#L194-L232 | [
"def textToNumber(s):\n \"\"\"\n Converts raw number string to an integer.\n Based on text2num.py by Greg Hewill.\n \"\"\"\n a = re.split(r\"[\\s-]+\", s)\n n = 0\n g = 0\n for w in a:\n x = NumberService.__small__.get(w, None)\n if x is not None:\n g += x\n elif w == \"hundred\":\n g *= 100\n else:\n x = NumberService.__magnitude__.get(w, None)\n if x is not None:\n n += g * x\n g = 0\n else:\n raise NumberService.NumberException(\n \"Unknown number: \" + w)\n return n + g\n"
] | class NumberService(object):
__small__ = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
__magnitude__ = {
'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
__ordinals__ = {
'first': 'one',
'second': 'two',
'third': 'three',
'fourth': 'four',
'fifth': 'five',
'sixth': 'six',
'seventh': 'seventh',
'eighth': 'eight',
'ninth': 'nine',
'tenth': 'ten',
'eleventh': 'eleven',
'twelth': 'twelve',
'thirteenth': 'thirteen',
'fifteenth': 'fifteen',
'sixteenth': 'sixteen',
'seventeenth': 'seventeen',
'eighteenth': 'eighteen',
'nineteenth': 'nineteen',
'twentieth': 'twenty',
'thirtieth': 'thirty',
'fortieth': 'forty',
'fiftieth': 'fifty',
'sixtieth': 'sixty',
'seventieth': 'seventy',
'eightieth': 'eighty',
'ninetieth': 'ninety',
'hundredth': 'hundred'
}
__fractions__ = {
'quarter': 'four',
'half': 'two',
'halve': 'two'
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def parse(self, words):
"""A general method for parsing word-representations of numbers.
Supports floats and integers.
Args:
words (str): Description of an arbitrary number.
Returns:
A double representation of the words.
"""
def exact(words):
"""If already represented as float or int, convert."""
try:
return float(words)
except:
return None
guess = exact(words)
if guess is not None:
return guess
split = words.split(' ')
# Replace final ordinal/fraction with number
if split[-1] in self.__fractions__:
split[-1] = self.__fractions__[split[-1]]
elif split[-1] in self.__ordinals__:
split[-1] = self.__ordinals__[split[-1]]
parsed_ordinals = ' '.join(split)
return self.parseFloat(parsed_ordinals)
def parseFloat(self, words):
"""Convert a floating-point number described in words to a double.
Supports two kinds of descriptions: those with a 'point' (e.g.,
"one point two five") and those with a fraction (e.g., "one and
a quarter").
Args:
words (str): Description of the floating-point number.
Returns:
A double representation of the words.
"""
def pointFloat(words):
m = re.search(r'(.*) point (.*)', words)
if m:
whole = m.group(1)
frac = m.group(2)
total = 0.0
coeff = 0.10
for digit in frac.split(' '):
total += coeff * self.parse(digit)
coeff /= 10.0
return self.parseInt(whole) + total
return None
def fractionFloat(words):
m = re.search(r'(.*) and (.*)', words)
if m:
whole = self.parseInt(m.group(1))
frac = m.group(2)
# Replace plurals
frac = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', frac)
# Convert 'a' to 'one' (e.g., 'a third' to 'one third')
frac = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', frac)
split = frac.split(' ')
# Split fraction into num (regular integer), denom (ordinal)
num = split[:1]
denom = split[1:]
while denom:
try:
# Test for valid num, denom
num_value = self.parse(' '.join(num))
denom_value = self.parse(' '.join(denom))
return whole + float(num_value) / denom_value
except:
# Add another word to num
num += denom[:1]
denom = denom[1:]
return None
# Extract "one point two five"-type float
result = pointFloat(words)
if result:
return result
# Extract "one and a quarter"-type float
result = fractionFloat(words)
if result:
return result
# Parse as integer
return self.parseInt(words)
def isValid(self, inp):
try:
self.parse(inp)
return True
except:
return False
@staticmethod
def parseMagnitude(m):
"""Parses a number m into a human-ready string representation.
For example, crops off floats if they're too accurate.
Arguments:
m (float): Floating-point number to be cleaned.
Returns:
Human-ready string description of the number.
"""
m = NumberService().parse(m)
def toDecimalPrecision(n, k):
return float("%.*f" % (k, round(n, k)))
# Cast to two digits of precision
digits = 2
magnitude = toDecimalPrecision(m, digits)
# If value is really small, keep going
while not magnitude:
digits += 1
magnitude = toDecimalPrecision(m, digits)
# If item is less than one, go one beyond 'necessary' number of digits
if m < 1.0:
magnitude = toDecimalPrecision(m, digits + 1)
# Ignore decimal accuracy if irrelevant
if int(magnitude) == magnitude:
magnitude = int(magnitude)
# Adjust for scientific notation
magString = str(magnitude)
magString = re.sub(r'(\d)e-(\d+)',
'\g<1> times ten to the negative \g<2>', magString)
magString = re.sub(r'(\d)e\+(\d+)',
'\g<1> times ten to the \g<2>', magString)
magString = re.sub(r'-(\d+)', 'negative \g<1>', magString)
magString = re.sub(r'\b0(\d+)', '\g<1>', magString)
return magString
def longestNumber(self, inp):
"""Extracts the longest valid numerical description from a string.
Not guaranteed to return a result even if some valid numerical
description exists (i.e., method is not particularly advanced).
Args:
inp (str): An arbitrary string, hopefully containing a number.
Returns:
The number with the longest string description in input,
or None if not found.
"""
split = inp.split(' ')
# Assume just a single number
numStart = None
numEnd = None
for i, w in enumerate(split):
if self.isValid(w):
if numStart is None:
numStart = i
numEnd = i
else:
# Check for ordinal, which would signify end
w = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', w)
if w in self.__ordinals__:
if self.isValid(' '.join(split[numStart:i + 1])):
numEnd = i
break
description = ' '.join(split[numStart:numEnd + 1])
return self.parse(description)
|
crm416/semantic | semantic/numbers.py | NumberService.parseMagnitude | python | def parseMagnitude(m):
m = NumberService().parse(m)
def toDecimalPrecision(n, k):
return float("%.*f" % (k, round(n, k)))
# Cast to two digits of precision
digits = 2
magnitude = toDecimalPrecision(m, digits)
# If value is really small, keep going
while not magnitude:
digits += 1
magnitude = toDecimalPrecision(m, digits)
# If item is less than one, go one beyond 'necessary' number of digits
if m < 1.0:
magnitude = toDecimalPrecision(m, digits + 1)
# Ignore decimal accuracy if irrelevant
if int(magnitude) == magnitude:
magnitude = int(magnitude)
# Adjust for scientific notation
magString = str(magnitude)
magString = re.sub(r'(\d)e-(\d+)',
'\g<1> times ten to the negative \g<2>', magString)
magString = re.sub(r'(\d)e\+(\d+)',
'\g<1> times ten to the \g<2>', magString)
magString = re.sub(r'-(\d+)', 'negative \g<1>', magString)
magString = re.sub(r'\b0(\d+)', '\g<1>', magString)
return magString | Parses a number m into a human-ready string representation.
For example, crops off floats if they're too accurate.
Arguments:
m (float): Floating-point number to be cleaned.
Returns:
Human-ready string description of the number. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/numbers.py#L242-L282 | [
"def parse(self, words):\n \"\"\"A general method for parsing word-representations of numbers.\n Supports floats and integers.\n\n Args:\n words (str): Description of an arbitrary number.\n\n Returns:\n A double representation of the words.\n \"\"\"\n def exact(words):\n \"\"\"If already represented as float or int, convert.\"\"\"\n try:\n return float(words)\n except:\n return None\n\n guess = exact(words)\n if guess is not None:\n return guess\n\n split = words.split(' ')\n\n # Replace final ordinal/fraction with number\n if split[-1] in self.__fractions__:\n split[-1] = self.__fractions__[split[-1]]\n elif split[-1] in self.__ordinals__:\n split[-1] = self.__ordinals__[split[-1]]\n\n parsed_ordinals = ' '.join(split)\n\n return self.parseFloat(parsed_ordinals)\n",
"def toDecimalPrecision(n, k):\n return float(\"%.*f\" % (k, round(n, k)))\n"
] | class NumberService(object):
__small__ = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
__magnitude__ = {
'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
__ordinals__ = {
'first': 'one',
'second': 'two',
'third': 'three',
'fourth': 'four',
'fifth': 'five',
'sixth': 'six',
'seventh': 'seventh',
'eighth': 'eight',
'ninth': 'nine',
'tenth': 'ten',
'eleventh': 'eleven',
'twelth': 'twelve',
'thirteenth': 'thirteen',
'fifteenth': 'fifteen',
'sixteenth': 'sixteen',
'seventeenth': 'seventeen',
'eighteenth': 'eighteen',
'nineteenth': 'nineteen',
'twentieth': 'twenty',
'thirtieth': 'thirty',
'fortieth': 'forty',
'fiftieth': 'fifty',
'sixtieth': 'sixty',
'seventieth': 'seventy',
'eightieth': 'eighty',
'ninetieth': 'ninety',
'hundredth': 'hundred'
}
__fractions__ = {
'quarter': 'four',
'half': 'two',
'halve': 'two'
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def parse(self, words):
"""A general method for parsing word-representations of numbers.
Supports floats and integers.
Args:
words (str): Description of an arbitrary number.
Returns:
A double representation of the words.
"""
def exact(words):
"""If already represented as float or int, convert."""
try:
return float(words)
except:
return None
guess = exact(words)
if guess is not None:
return guess
split = words.split(' ')
# Replace final ordinal/fraction with number
if split[-1] in self.__fractions__:
split[-1] = self.__fractions__[split[-1]]
elif split[-1] in self.__ordinals__:
split[-1] = self.__ordinals__[split[-1]]
parsed_ordinals = ' '.join(split)
return self.parseFloat(parsed_ordinals)
def parseFloat(self, words):
"""Convert a floating-point number described in words to a double.
Supports two kinds of descriptions: those with a 'point' (e.g.,
"one point two five") and those with a fraction (e.g., "one and
a quarter").
Args:
words (str): Description of the floating-point number.
Returns:
A double representation of the words.
"""
def pointFloat(words):
m = re.search(r'(.*) point (.*)', words)
if m:
whole = m.group(1)
frac = m.group(2)
total = 0.0
coeff = 0.10
for digit in frac.split(' '):
total += coeff * self.parse(digit)
coeff /= 10.0
return self.parseInt(whole) + total
return None
def fractionFloat(words):
m = re.search(r'(.*) and (.*)', words)
if m:
whole = self.parseInt(m.group(1))
frac = m.group(2)
# Replace plurals
frac = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', frac)
# Convert 'a' to 'one' (e.g., 'a third' to 'one third')
frac = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', frac)
split = frac.split(' ')
# Split fraction into num (regular integer), denom (ordinal)
num = split[:1]
denom = split[1:]
while denom:
try:
# Test for valid num, denom
num_value = self.parse(' '.join(num))
denom_value = self.parse(' '.join(denom))
return whole + float(num_value) / denom_value
except:
# Add another word to num
num += denom[:1]
denom = denom[1:]
return None
# Extract "one point two five"-type float
result = pointFloat(words)
if result:
return result
# Extract "one and a quarter"-type float
result = fractionFloat(words)
if result:
return result
# Parse as integer
return self.parseInt(words)
def parseInt(self, words):
"""Parses words to the integer they describe.
Args:
words (str): Description of the integer.
Returns:
An integer representation of the words.
"""
# Remove 'and', case-sensitivity
words = words.replace(" and ", " ").lower()
# 'a' -> 'one'
words = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', words)
def textToNumber(s):
"""
Converts raw number string to an integer.
Based on text2num.py by Greg Hewill.
"""
a = re.split(r"[\s-]+", s)
n = 0
g = 0
for w in a:
x = NumberService.__small__.get(w, None)
if x is not None:
g += x
elif w == "hundred":
g *= 100
else:
x = NumberService.__magnitude__.get(w, None)
if x is not None:
n += g * x
g = 0
else:
raise NumberService.NumberException(
"Unknown number: " + w)
return n + g
return textToNumber(words)
def isValid(self, inp):
try:
self.parse(inp)
return True
except:
return False
@staticmethod
def longestNumber(self, inp):
"""Extracts the longest valid numerical description from a string.
Not guaranteed to return a result even if some valid numerical
description exists (i.e., method is not particularly advanced).
Args:
inp (str): An arbitrary string, hopefully containing a number.
Returns:
The number with the longest string description in input,
or None if not found.
"""
split = inp.split(' ')
# Assume just a single number
numStart = None
numEnd = None
for i, w in enumerate(split):
if self.isValid(w):
if numStart is None:
numStart = i
numEnd = i
else:
# Check for ordinal, which would signify end
w = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', w)
if w in self.__ordinals__:
if self.isValid(' '.join(split[numStart:i + 1])):
numEnd = i
break
description = ' '.join(split[numStart:numEnd + 1])
return self.parse(description)
|
crm416/semantic | semantic/numbers.py | NumberService.longestNumber | python | def longestNumber(self, inp):
split = inp.split(' ')
# Assume just a single number
numStart = None
numEnd = None
for i, w in enumerate(split):
if self.isValid(w):
if numStart is None:
numStart = i
numEnd = i
else:
# Check for ordinal, which would signify end
w = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', w)
if w in self.__ordinals__:
if self.isValid(' '.join(split[numStart:i + 1])):
numEnd = i
break
description = ' '.join(split[numStart:numEnd + 1])
return self.parse(description) | Extracts the longest valid numerical description from a string.
Not guaranteed to return a result even if some valid numerical
description exists (i.e., method is not particularly advanced).
Args:
inp (str): An arbitrary string, hopefully containing a number.
Returns:
The number with the longest string description in input,
or None if not found. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/numbers.py#L284-L314 | [
"def parse(self, words):\n \"\"\"A general method for parsing word-representations of numbers.\n Supports floats and integers.\n\n Args:\n words (str): Description of an arbitrary number.\n\n Returns:\n A double representation of the words.\n \"\"\"\n def exact(words):\n \"\"\"If already represented as float or int, convert.\"\"\"\n try:\n return float(words)\n except:\n return None\n\n guess = exact(words)\n if guess is not None:\n return guess\n\n split = words.split(' ')\n\n # Replace final ordinal/fraction with number\n if split[-1] in self.__fractions__:\n split[-1] = self.__fractions__[split[-1]]\n elif split[-1] in self.__ordinals__:\n split[-1] = self.__ordinals__[split[-1]]\n\n parsed_ordinals = ' '.join(split)\n\n return self.parseFloat(parsed_ordinals)\n",
"def isValid(self, inp):\n try:\n self.parse(inp)\n return True\n except:\n return False\n"
] | class NumberService(object):
__small__ = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
__magnitude__ = {
'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
__ordinals__ = {
'first': 'one',
'second': 'two',
'third': 'three',
'fourth': 'four',
'fifth': 'five',
'sixth': 'six',
'seventh': 'seventh',
'eighth': 'eight',
'ninth': 'nine',
'tenth': 'ten',
'eleventh': 'eleven',
'twelth': 'twelve',
'thirteenth': 'thirteen',
'fifteenth': 'fifteen',
'sixteenth': 'sixteen',
'seventeenth': 'seventeen',
'eighteenth': 'eighteen',
'nineteenth': 'nineteen',
'twentieth': 'twenty',
'thirtieth': 'thirty',
'fortieth': 'forty',
'fiftieth': 'fifty',
'sixtieth': 'sixty',
'seventieth': 'seventy',
'eightieth': 'eighty',
'ninetieth': 'ninety',
'hundredth': 'hundred'
}
__fractions__ = {
'quarter': 'four',
'half': 'two',
'halve': 'two'
}
class NumberException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def parse(self, words):
"""A general method for parsing word-representations of numbers.
Supports floats and integers.
Args:
words (str): Description of an arbitrary number.
Returns:
A double representation of the words.
"""
def exact(words):
"""If already represented as float or int, convert."""
try:
return float(words)
except:
return None
guess = exact(words)
if guess is not None:
return guess
split = words.split(' ')
# Replace final ordinal/fraction with number
if split[-1] in self.__fractions__:
split[-1] = self.__fractions__[split[-1]]
elif split[-1] in self.__ordinals__:
split[-1] = self.__ordinals__[split[-1]]
parsed_ordinals = ' '.join(split)
return self.parseFloat(parsed_ordinals)
def parseFloat(self, words):
"""Convert a floating-point number described in words to a double.
Supports two kinds of descriptions: those with a 'point' (e.g.,
"one point two five") and those with a fraction (e.g., "one and
a quarter").
Args:
words (str): Description of the floating-point number.
Returns:
A double representation of the words.
"""
def pointFloat(words):
m = re.search(r'(.*) point (.*)', words)
if m:
whole = m.group(1)
frac = m.group(2)
total = 0.0
coeff = 0.10
for digit in frac.split(' '):
total += coeff * self.parse(digit)
coeff /= 10.0
return self.parseInt(whole) + total
return None
def fractionFloat(words):
m = re.search(r'(.*) and (.*)', words)
if m:
whole = self.parseInt(m.group(1))
frac = m.group(2)
# Replace plurals
frac = re.sub(r'(\w+)s(\b)', '\g<1>\g<2>', frac)
# Convert 'a' to 'one' (e.g., 'a third' to 'one third')
frac = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', frac)
split = frac.split(' ')
# Split fraction into num (regular integer), denom (ordinal)
num = split[:1]
denom = split[1:]
while denom:
try:
# Test for valid num, denom
num_value = self.parse(' '.join(num))
denom_value = self.parse(' '.join(denom))
return whole + float(num_value) / denom_value
except:
# Add another word to num
num += denom[:1]
denom = denom[1:]
return None
# Extract "one point two five"-type float
result = pointFloat(words)
if result:
return result
# Extract "one and a quarter"-type float
result = fractionFloat(words)
if result:
return result
# Parse as integer
return self.parseInt(words)
def parseInt(self, words):
"""Parses words to the integer they describe.
Args:
words (str): Description of the integer.
Returns:
An integer representation of the words.
"""
# Remove 'and', case-sensitivity
words = words.replace(" and ", " ").lower()
# 'a' -> 'one'
words = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', words)
def textToNumber(s):
"""
Converts raw number string to an integer.
Based on text2num.py by Greg Hewill.
"""
a = re.split(r"[\s-]+", s)
n = 0
g = 0
for w in a:
x = NumberService.__small__.get(w, None)
if x is not None:
g += x
elif w == "hundred":
g *= 100
else:
x = NumberService.__magnitude__.get(w, None)
if x is not None:
n += g * x
g = 0
else:
raise NumberService.NumberException(
"Unknown number: " + w)
return n + g
return textToNumber(words)
def isValid(self, inp):
try:
self.parse(inp)
return True
except:
return False
@staticmethod
def parseMagnitude(m):
"""Parses a number m into a human-ready string representation.
For example, crops off floats if they're too accurate.
Arguments:
m (float): Floating-point number to be cleaned.
Returns:
Human-ready string description of the number.
"""
m = NumberService().parse(m)
def toDecimalPrecision(n, k):
return float("%.*f" % (k, round(n, k)))
# Cast to two digits of precision
digits = 2
magnitude = toDecimalPrecision(m, digits)
# If value is really small, keep going
while not magnitude:
digits += 1
magnitude = toDecimalPrecision(m, digits)
# If item is less than one, go one beyond 'necessary' number of digits
if m < 1.0:
magnitude = toDecimalPrecision(m, digits + 1)
# Ignore decimal accuracy if irrelevant
if int(magnitude) == magnitude:
magnitude = int(magnitude)
# Adjust for scientific notation
magString = str(magnitude)
magString = re.sub(r'(\d)e-(\d+)',
'\g<1> times ten to the negative \g<2>', magString)
magString = re.sub(r'(\d)e\+(\d+)',
'\g<1> times ten to the \g<2>', magString)
magString = re.sub(r'-(\d+)', 'negative \g<1>', magString)
magString = re.sub(r'\b0(\d+)', '\g<1>', magString)
return magString
|
crm416/semantic | semantic/dates.py | extractDates | python | def extractDates(inp, tz=None, now=None):
service = DateService(tz=tz, now=now)
return service.extractDates(inp) | Extract semantic date information from an input string.
This is a convenience method which would only be used if
you'd rather not initialize a DateService object.
Args:
inp (str): The input string to be parsed.
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A list of datetime objects extracted from input. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L476-L495 | [
"def extractDates(self, inp):\n \"\"\"Extract semantic date information from an input string.\n In effect, runs both parseDay and parseTime on the input\n string and merges the results to produce a comprehensive\n datetime object.\n\n Args:\n inp (str): Input string to be parsed.\n\n Returns:\n A list of datetime objects containing the extracted dates from the\n input snippet, or an empty list if not found.\n \"\"\"\n def merge(param):\n day, time = param\n if not (day or time):\n return None\n\n if not day:\n return time\n if not time:\n return day\n\n return datetime.datetime(\n day.year, day.month, day.day, time.hour, time.minute\n )\n\n days = self.extractDays(inp)\n times = self.extractTimes(inp)\n return map(merge, zip_longest(days, times, fillvalue=None))\n"
] | import re
import datetime
try:
from itertools import zip_longest
except:
from itertools import izip_longest as zip_longest
from .numbers import NumberService
class DateService(object):
"""Initialize a DateService for extracting dates from text.
Args:
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A DateService which uses tz and now for all of its computations.
"""
def __init__(self, tz=None, now=None):
self.tz = tz
if now:
self.now = now
else:
self.now = datetime.datetime.now(tz=self.tz)
__months__ = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
__shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may',
'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec']
__daysOfWeek__ = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
__relativeDates__ = ['tomorrow', 'tonight', 'next']
__todayMatches__ = ['tonight', 'today', 'this morning',
'this evening', 'this afternoon']
__tomorrowMatches__ = ['tomorrow', 'next morning',
'next evening', 'next afternoon']
__dateDescriptors__ = {
'one': 1,
'first': 1,
'two': 2,
'second': 2,
'three': 3,
'third': 3,
'four': 4,
'fourth': 4,
'five': 5,
'fifth': 5,
'six': 6,
'sixth': 6,
'seven': 7,
'seventh': 7,
'eight': 8,
'eighth': 8,
'nine': 9,
'ninth': 9,
'ten': 10,
'tenth': 10,
'eleven': 11,
'eleventh': 11,
'twelve': 12,
'twelth': 12,
'thirteen': 13,
'thirteenth': 13,
'fourteen': 14,
'fourteenth': 14,
'fifteen': 15,
'fifteenth': 15,
'sixteen': 16,
'sixteenth': 16,
'seventeen': 17,
'seventeenth': 17,
'eighteen': 18,
'eighteenth': 18,
'nineteen': 19,
'nineteenth': 19,
'twenty': 20,
'twentieth': 20,
'twenty one': 21,
'twenty first': 21,
'twenty two': 22,
'twenty second': 22,
'twenty three': 23,
'twenty third': 23,
'twenty four': 24,
'twenty fourth': 24,
'twenty five': 25,
'twenty fifth': 25,
'twenty six': 26,
'twenty sixth': 26,
'twenty seven': 27,
'twenty seventh': 27,
'twenty eight': 28,
'twenty eighth': 28,
'twenty nine': 29,
'twenty ninth': 29,
'thirty': 30,
'thirtieth': 30,
'thirty one': 31,
'thirty first': 31
}
_dayRegex = re.compile(
r"""(?ix)
((week|day)s?\ from\ )?
(
tomorrow
|tonight
|today
|(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*)
)
""")
_timeRegex = re.compile(
r"""(?ix)
.*?
(
morning
|afternoon
|evening
|(\d{1,2}\:\d{2})\ ?(am|pm)?
|in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))?
)
.*?""")
def _preprocess(self, inp):
return inp.replace('-', ' ').lower()
def extractDays(self, inp):
"""Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found.
"""
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
# Grab 'three' of 'three weeks from'
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
# Generate best guess number
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
"""For safe evaluation of regex groups"""
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
# Convert extracted terms to datetime object
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches]
def extractDay(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
day = self.extractDay(inp)
if day:
return day[0]
return None
def extractTimes(self, inp):
"""Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found.
"""
def handleMatch(time):
relative = False
if not time:
return None
# Default times: 8am, 12pm, 7pm
elif time.group(1) == 'morning':
h = 8
m = 0
elif time.group(1) == 'afternoon':
h = 12
m = 0
elif time.group(1) == 'evening':
h = 19
m = 0
elif time.group(4) and time.group(5):
h, m = 0, 0
# Extract hours difference
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if time.group(5) == 'hours':
h += diff
else:
m += diff
# Extract minutes difference
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if time.group(8) == 'hours':
h += diff
else:
m += diff
relative = True
else:
# Convert from "HH:MM pm" format
t = time.group(2)
h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])
try:
if time.group(3) == 'pm':
h += 12
except IndexError:
pass
if relative:
return self.now + datetime.timedelta(hours=h, minutes=m)
else:
return datetime.datetime(
self.now.year, self.now.month, self.now.day, h, m
)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
def extractTime(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
times = self.extractTimes(inp)
if times:
return times[0]
return None
def extractDates(self, inp):
"""Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
"""
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None))
def extractDate(self, inp):
"""Returns the first date found in the input string, or None if not
found."""
dates = self.extractDates(inp)
for date in dates:
return date
return None
def convertDay(self, day, prefix="", weekday=False):
"""Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information.
"""
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
# Ex) Remove '0' from 'August 03'
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString
def convertTime(self, time):
"""Convert a datetime object representing a time into a human-ready
string that can be read, spoken aloud, etc.
Args:
time (datetime.date): A datetime object to be converted into text.
Returns:
A string representation of the input time, ignoring any day-related
information.
"""
# if ':00', ignore reporting minutes
m_format = ""
if time.minute:
m_format = ":%M"
timeString = time.strftime("%I" + m_format + " %p")
# if '07:30', cast to '7:30'
if not int(timeString[0]):
timeString = timeString[1:]
return timeString
def convertDate(self, date, prefix="", weekday=False):
"""Convert a datetime object representing into a human-ready
string that can be read, spoken aloud, etc. In effect, runs
both convertDay and convertTime on the input, merging the results.
Args:
date (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day and time.
"""
dayString = self.convertDay(
date, prefix=prefix, weekday=weekday)
timeString = self.convertTime(date)
return dayString + " at " + timeString
|
crm416/semantic | semantic/dates.py | DateService.extractDays | python | def extractDays(self, inp):
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
# Grab 'three' of 'three weeks from'
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
# Generate best guess number
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
"""For safe evaluation of regex groups"""
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
# Convert extracted terms to datetime object
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches] | Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L145-L253 | [
"def _preprocess(self, inp):\n return inp.replace('-', ' ').lower()\n"
] | class DateService(object):
"""Initialize a DateService for extracting dates from text.
Args:
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A DateService which uses tz and now for all of its computations.
"""
def __init__(self, tz=None, now=None):
self.tz = tz
if now:
self.now = now
else:
self.now = datetime.datetime.now(tz=self.tz)
__months__ = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
__shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may',
'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec']
__daysOfWeek__ = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
__relativeDates__ = ['tomorrow', 'tonight', 'next']
__todayMatches__ = ['tonight', 'today', 'this morning',
'this evening', 'this afternoon']
__tomorrowMatches__ = ['tomorrow', 'next morning',
'next evening', 'next afternoon']
__dateDescriptors__ = {
'one': 1,
'first': 1,
'two': 2,
'second': 2,
'three': 3,
'third': 3,
'four': 4,
'fourth': 4,
'five': 5,
'fifth': 5,
'six': 6,
'sixth': 6,
'seven': 7,
'seventh': 7,
'eight': 8,
'eighth': 8,
'nine': 9,
'ninth': 9,
'ten': 10,
'tenth': 10,
'eleven': 11,
'eleventh': 11,
'twelve': 12,
'twelth': 12,
'thirteen': 13,
'thirteenth': 13,
'fourteen': 14,
'fourteenth': 14,
'fifteen': 15,
'fifteenth': 15,
'sixteen': 16,
'sixteenth': 16,
'seventeen': 17,
'seventeenth': 17,
'eighteen': 18,
'eighteenth': 18,
'nineteen': 19,
'nineteenth': 19,
'twenty': 20,
'twentieth': 20,
'twenty one': 21,
'twenty first': 21,
'twenty two': 22,
'twenty second': 22,
'twenty three': 23,
'twenty third': 23,
'twenty four': 24,
'twenty fourth': 24,
'twenty five': 25,
'twenty fifth': 25,
'twenty six': 26,
'twenty sixth': 26,
'twenty seven': 27,
'twenty seventh': 27,
'twenty eight': 28,
'twenty eighth': 28,
'twenty nine': 29,
'twenty ninth': 29,
'thirty': 30,
'thirtieth': 30,
'thirty one': 31,
'thirty first': 31
}
_dayRegex = re.compile(
r"""(?ix)
((week|day)s?\ from\ )?
(
tomorrow
|tonight
|today
|(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*)
)
""")
_timeRegex = re.compile(
r"""(?ix)
.*?
(
morning
|afternoon
|evening
|(\d{1,2}\:\d{2})\ ?(am|pm)?
|in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))?
)
.*?""")
def _preprocess(self, inp):
return inp.replace('-', ' ').lower()
def extractDay(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
day = self.extractDay(inp)
if day:
return day[0]
return None
def extractTimes(self, inp):
"""Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found.
"""
def handleMatch(time):
relative = False
if not time:
return None
# Default times: 8am, 12pm, 7pm
elif time.group(1) == 'morning':
h = 8
m = 0
elif time.group(1) == 'afternoon':
h = 12
m = 0
elif time.group(1) == 'evening':
h = 19
m = 0
elif time.group(4) and time.group(5):
h, m = 0, 0
# Extract hours difference
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if time.group(5) == 'hours':
h += diff
else:
m += diff
# Extract minutes difference
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if time.group(8) == 'hours':
h += diff
else:
m += diff
relative = True
else:
# Convert from "HH:MM pm" format
t = time.group(2)
h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])
try:
if time.group(3) == 'pm':
h += 12
except IndexError:
pass
if relative:
return self.now + datetime.timedelta(hours=h, minutes=m)
else:
return datetime.datetime(
self.now.year, self.now.month, self.now.day, h, m
)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
def extractTime(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
times = self.extractTimes(inp)
if times:
return times[0]
return None
def extractDates(self, inp):
"""Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
"""
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None))
def extractDate(self, inp):
"""Returns the first date found in the input string, or None if not
found."""
dates = self.extractDates(inp)
for date in dates:
return date
return None
def convertDay(self, day, prefix="", weekday=False):
"""Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information.
"""
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
# Ex) Remove '0' from 'August 03'
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString
def convertTime(self, time):
"""Convert a datetime object representing a time into a human-ready
string that can be read, spoken aloud, etc.
Args:
time (datetime.date): A datetime object to be converted into text.
Returns:
A string representation of the input time, ignoring any day-related
information.
"""
# if ':00', ignore reporting minutes
m_format = ""
if time.minute:
m_format = ":%M"
timeString = time.strftime("%I" + m_format + " %p")
# if '07:30', cast to '7:30'
if not int(timeString[0]):
timeString = timeString[1:]
return timeString
def convertDate(self, date, prefix="", weekday=False):
"""Convert a datetime object representing into a human-ready
string that can be read, spoken aloud, etc. In effect, runs
both convertDay and convertTime on the input, merging the results.
Args:
date (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day and time.
"""
dayString = self.convertDay(
date, prefix=prefix, weekday=weekday)
timeString = self.convertTime(date)
return dayString + " at " + timeString
|
crm416/semantic | semantic/dates.py | DateService.extractTimes | python | def extractTimes(self, inp):
def handleMatch(time):
relative = False
if not time:
return None
# Default times: 8am, 12pm, 7pm
elif time.group(1) == 'morning':
h = 8
m = 0
elif time.group(1) == 'afternoon':
h = 12
m = 0
elif time.group(1) == 'evening':
h = 19
m = 0
elif time.group(4) and time.group(5):
h, m = 0, 0
# Extract hours difference
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if time.group(5) == 'hours':
h += diff
else:
m += diff
# Extract minutes difference
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if time.group(8) == 'hours':
h += diff
else:
m += diff
relative = True
else:
# Convert from "HH:MM pm" format
t = time.group(2)
h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])
try:
if time.group(3) == 'pm':
h += 12
except IndexError:
pass
if relative:
return self.now + datetime.timedelta(hours=h, minutes=m)
else:
return datetime.datetime(
self.now.year, self.now.month, self.now.day, h, m
)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)] | Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L263-L339 | [
"def _preprocess(self, inp):\n return inp.replace('-', ' ').lower()\n"
] | class DateService(object):
"""Initialize a DateService for extracting dates from text.
Args:
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A DateService which uses tz and now for all of its computations.
"""
def __init__(self, tz=None, now=None):
self.tz = tz
if now:
self.now = now
else:
self.now = datetime.datetime.now(tz=self.tz)
__months__ = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
__shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may',
'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec']
__daysOfWeek__ = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
__relativeDates__ = ['tomorrow', 'tonight', 'next']
__todayMatches__ = ['tonight', 'today', 'this morning',
'this evening', 'this afternoon']
__tomorrowMatches__ = ['tomorrow', 'next morning',
'next evening', 'next afternoon']
__dateDescriptors__ = {
'one': 1,
'first': 1,
'two': 2,
'second': 2,
'three': 3,
'third': 3,
'four': 4,
'fourth': 4,
'five': 5,
'fifth': 5,
'six': 6,
'sixth': 6,
'seven': 7,
'seventh': 7,
'eight': 8,
'eighth': 8,
'nine': 9,
'ninth': 9,
'ten': 10,
'tenth': 10,
'eleven': 11,
'eleventh': 11,
'twelve': 12,
'twelth': 12,
'thirteen': 13,
'thirteenth': 13,
'fourteen': 14,
'fourteenth': 14,
'fifteen': 15,
'fifteenth': 15,
'sixteen': 16,
'sixteenth': 16,
'seventeen': 17,
'seventeenth': 17,
'eighteen': 18,
'eighteenth': 18,
'nineteen': 19,
'nineteenth': 19,
'twenty': 20,
'twentieth': 20,
'twenty one': 21,
'twenty first': 21,
'twenty two': 22,
'twenty second': 22,
'twenty three': 23,
'twenty third': 23,
'twenty four': 24,
'twenty fourth': 24,
'twenty five': 25,
'twenty fifth': 25,
'twenty six': 26,
'twenty sixth': 26,
'twenty seven': 27,
'twenty seventh': 27,
'twenty eight': 28,
'twenty eighth': 28,
'twenty nine': 29,
'twenty ninth': 29,
'thirty': 30,
'thirtieth': 30,
'thirty one': 31,
'thirty first': 31
}
_dayRegex = re.compile(
r"""(?ix)
((week|day)s?\ from\ )?
(
tomorrow
|tonight
|today
|(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*)
)
""")
_timeRegex = re.compile(
r"""(?ix)
.*?
(
morning
|afternoon
|evening
|(\d{1,2}\:\d{2})\ ?(am|pm)?
|in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))?
)
.*?""")
def _preprocess(self, inp):
return inp.replace('-', ' ').lower()
def extractDays(self, inp):
"""Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found.
"""
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
# Grab 'three' of 'three weeks from'
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
# Generate best guess number
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
"""For safe evaluation of regex groups"""
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
# Convert extracted terms to datetime object
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches]
def extractDay(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
day = self.extractDay(inp)
if day:
return day[0]
return None
def extractTime(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
times = self.extractTimes(inp)
if times:
return times[0]
return None
def extractDates(self, inp):
"""Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
"""
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None))
def extractDate(self, inp):
"""Returns the first date found in the input string, or None if not
found."""
dates = self.extractDates(inp)
for date in dates:
return date
return None
def convertDay(self, day, prefix="", weekday=False):
"""Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information.
"""
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
# Ex) Remove '0' from 'August 03'
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString
def convertTime(self, time):
"""Convert a datetime object representing a time into a human-ready
string that can be read, spoken aloud, etc.
Args:
time (datetime.date): A datetime object to be converted into text.
Returns:
A string representation of the input time, ignoring any day-related
information.
"""
# if ':00', ignore reporting minutes
m_format = ""
if time.minute:
m_format = ":%M"
timeString = time.strftime("%I" + m_format + " %p")
# if '07:30', cast to '7:30'
if not int(timeString[0]):
timeString = timeString[1:]
return timeString
def convertDate(self, date, prefix="", weekday=False):
"""Convert a datetime object representing into a human-ready
string that can be read, spoken aloud, etc. In effect, runs
both convertDay and convertTime on the input, merging the results.
Args:
date (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day and time.
"""
dayString = self.convertDay(
date, prefix=prefix, weekday=weekday)
timeString = self.convertTime(date)
return dayString + " at " + timeString
|
crm416/semantic | semantic/dates.py | DateService.extractDates | python | def extractDates(self, inp):
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None)) | Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L349-L378 | [
"def extractDays(self, inp):\n \"\"\"Extracts all day-related information from an input string.\n Ignores any information related to the specific time-of-day.\n\n Args:\n inp (str): Input string to be parsed.\n\n Returns:\n A list of datetime objects containing the extracted date from the\n input snippet, or an empty list if none found.\n \"\"\"\n inp = self._preprocess(inp)\n\n def extractDayOfWeek(dayMatch):\n if dayMatch.group(5) in self.__daysOfWeek__:\n return self.__daysOfWeek__.index(dayMatch.group(5))\n elif dayMatch.group(6) in self.__daysOfWeek__:\n return self.__daysOfWeek__.index(dayMatch.group(6))\n\n def extractMonth(dayMatch):\n if dayMatch.group(7) in self.__months__:\n return self.__months__.index(dayMatch.group(7)) + 1\n elif dayMatch.group(7) in self.__shortMonths__:\n return self.__shortMonths__.index(dayMatch.group(7)) + 1\n\n def extractDay(dayMatch):\n combined = dayMatch.group(8) + dayMatch.group(9)\n if combined in self.__dateDescriptors__:\n return self.__dateDescriptors__[combined]\n elif dayMatch.group(8) in self.__dateDescriptors__:\n return self.__dateDescriptors__[dayMatch.group(8)]\n elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():\n return int(dayMatch.group(8))\n\n def extractDaysFrom(dayMatch):\n if not dayMatch.group(1):\n return 0\n\n def numericalPrefix(dayMatch):\n # Grab 'three' of 'three weeks from'\n prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')\n prefix.reverse()\n prefix = list(filter(lambda s: s != 'and', prefix))\n\n # Generate best guess number\n service = NumberService()\n num = prefix[0]\n if service.isValid(num):\n for n in prefix[1:]:\n inc = n + \" \" + num\n if service.isValid(inc):\n num = inc\n else:\n break\n return service.parse(num)\n return 1\n\n factor = numericalPrefix(dayMatch)\n\n if dayMatch.group(2) == 'week':\n return factor * 7\n elif dayMatch.group(2) == 'day':\n return factor * 1\n\n def handleMatch(dayMatch):\n def safe(exp):\n \"\"\"For safe evaluation of regex groups\"\"\"\n try:\n return exp()\n except:\n return False\n\n days_from = safe(lambda: extractDaysFrom(dayMatch))\n today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)\n tomorrow = safe(lambda: dayMatch.group(3)\n in self.__tomorrowMatches__)\n next_week = safe(lambda: dayMatch.group(4) == 'next')\n day_of_week = safe(lambda: extractDayOfWeek(dayMatch))\n month = safe(lambda: extractMonth(dayMatch))\n day = safe(lambda: extractDay(dayMatch))\n\n # Convert extracted terms to datetime object\n if not dayMatch:\n return None\n elif today:\n d = self.now\n elif tomorrow:\n d = self.now + datetime.timedelta(days=1)\n elif type(day_of_week) == int:\n current_day_of_week = self.now.weekday()\n num_days_away = (day_of_week - current_day_of_week) % 7\n\n if next_week:\n num_days_away += 7\n\n d = self.now + \\\n datetime.timedelta(days=num_days_away)\n elif month and day:\n d = datetime.datetime(\n self.now.year, month, day,\n self.now.hour, self.now.minute)\n\n if days_from:\n d += datetime.timedelta(days=days_from)\n\n return d\n\n matches = self._dayRegex.finditer(inp)\n return [handleMatch(dayMatch) for dayMatch in matches]\n",
"def extractTimes(self, inp):\n \"\"\"Extracts time-related information from an input string.\n Ignores any information related to the specific date, focusing\n on the time-of-day.\n\n Args:\n inp (str): Input string to be parsed.\n\n Returns:\n A list of datetime objects containing the extracted times from the\n input snippet, or an empty list if none found.\n \"\"\"\n def handleMatch(time):\n relative = False\n\n if not time:\n return None\n\n # Default times: 8am, 12pm, 7pm\n elif time.group(1) == 'morning':\n h = 8\n m = 0\n elif time.group(1) == 'afternoon':\n h = 12\n m = 0\n elif time.group(1) == 'evening':\n h = 19\n m = 0\n elif time.group(4) and time.group(5):\n h, m = 0, 0\n\n # Extract hours difference\n converter = NumberService()\n try:\n diff = converter.parse(time.group(4))\n except:\n return None\n\n if time.group(5) == 'hours':\n h += diff\n else:\n m += diff\n\n # Extract minutes difference\n if time.group(6):\n converter = NumberService()\n try:\n diff = converter.parse(time.group(7))\n except:\n return None\n\n if time.group(8) == 'hours':\n h += diff\n else:\n m += diff\n\n relative = True\n else:\n # Convert from \"HH:MM pm\" format\n t = time.group(2)\n h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])\n\n try:\n if time.group(3) == 'pm':\n h += 12\n except IndexError:\n pass\n\n if relative:\n return self.now + datetime.timedelta(hours=h, minutes=m)\n else:\n return datetime.datetime(\n self.now.year, self.now.month, self.now.day, h, m\n )\n\n inp = self._preprocess(inp)\n return [handleMatch(time) for time in self._timeRegex.finditer(inp)]\n"
] | class DateService(object):
"""Initialize a DateService for extracting dates from text.
Args:
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A DateService which uses tz and now for all of its computations.
"""
def __init__(self, tz=None, now=None):
self.tz = tz
if now:
self.now = now
else:
self.now = datetime.datetime.now(tz=self.tz)
__months__ = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
__shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may',
'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec']
__daysOfWeek__ = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
__relativeDates__ = ['tomorrow', 'tonight', 'next']
__todayMatches__ = ['tonight', 'today', 'this morning',
'this evening', 'this afternoon']
__tomorrowMatches__ = ['tomorrow', 'next morning',
'next evening', 'next afternoon']
__dateDescriptors__ = {
'one': 1,
'first': 1,
'two': 2,
'second': 2,
'three': 3,
'third': 3,
'four': 4,
'fourth': 4,
'five': 5,
'fifth': 5,
'six': 6,
'sixth': 6,
'seven': 7,
'seventh': 7,
'eight': 8,
'eighth': 8,
'nine': 9,
'ninth': 9,
'ten': 10,
'tenth': 10,
'eleven': 11,
'eleventh': 11,
'twelve': 12,
'twelth': 12,
'thirteen': 13,
'thirteenth': 13,
'fourteen': 14,
'fourteenth': 14,
'fifteen': 15,
'fifteenth': 15,
'sixteen': 16,
'sixteenth': 16,
'seventeen': 17,
'seventeenth': 17,
'eighteen': 18,
'eighteenth': 18,
'nineteen': 19,
'nineteenth': 19,
'twenty': 20,
'twentieth': 20,
'twenty one': 21,
'twenty first': 21,
'twenty two': 22,
'twenty second': 22,
'twenty three': 23,
'twenty third': 23,
'twenty four': 24,
'twenty fourth': 24,
'twenty five': 25,
'twenty fifth': 25,
'twenty six': 26,
'twenty sixth': 26,
'twenty seven': 27,
'twenty seventh': 27,
'twenty eight': 28,
'twenty eighth': 28,
'twenty nine': 29,
'twenty ninth': 29,
'thirty': 30,
'thirtieth': 30,
'thirty one': 31,
'thirty first': 31
}
_dayRegex = re.compile(
r"""(?ix)
((week|day)s?\ from\ )?
(
tomorrow
|tonight
|today
|(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*)
)
""")
_timeRegex = re.compile(
r"""(?ix)
.*?
(
morning
|afternoon
|evening
|(\d{1,2}\:\d{2})\ ?(am|pm)?
|in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))?
)
.*?""")
def _preprocess(self, inp):
return inp.replace('-', ' ').lower()
def extractDays(self, inp):
"""Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found.
"""
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
# Grab 'three' of 'three weeks from'
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
# Generate best guess number
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
"""For safe evaluation of regex groups"""
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
# Convert extracted terms to datetime object
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches]
def extractDay(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
day = self.extractDay(inp)
if day:
return day[0]
return None
def extractTimes(self, inp):
"""Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found.
"""
def handleMatch(time):
relative = False
if not time:
return None
# Default times: 8am, 12pm, 7pm
elif time.group(1) == 'morning':
h = 8
m = 0
elif time.group(1) == 'afternoon':
h = 12
m = 0
elif time.group(1) == 'evening':
h = 19
m = 0
elif time.group(4) and time.group(5):
h, m = 0, 0
# Extract hours difference
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if time.group(5) == 'hours':
h += diff
else:
m += diff
# Extract minutes difference
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if time.group(8) == 'hours':
h += diff
else:
m += diff
relative = True
else:
# Convert from "HH:MM pm" format
t = time.group(2)
h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])
try:
if time.group(3) == 'pm':
h += 12
except IndexError:
pass
if relative:
return self.now + datetime.timedelta(hours=h, minutes=m)
else:
return datetime.datetime(
self.now.year, self.now.month, self.now.day, h, m
)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
def extractTime(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
times = self.extractTimes(inp)
if times:
return times[0]
return None
def extractDate(self, inp):
"""Returns the first date found in the input string, or None if not
found."""
dates = self.extractDates(inp)
for date in dates:
return date
return None
def convertDay(self, day, prefix="", weekday=False):
"""Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information.
"""
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
# Ex) Remove '0' from 'August 03'
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString
def convertTime(self, time):
"""Convert a datetime object representing a time into a human-ready
string that can be read, spoken aloud, etc.
Args:
time (datetime.date): A datetime object to be converted into text.
Returns:
A string representation of the input time, ignoring any day-related
information.
"""
# if ':00', ignore reporting minutes
m_format = ""
if time.minute:
m_format = ":%M"
timeString = time.strftime("%I" + m_format + " %p")
# if '07:30', cast to '7:30'
if not int(timeString[0]):
timeString = timeString[1:]
return timeString
def convertDate(self, date, prefix="", weekday=False):
"""Convert a datetime object representing into a human-ready
string that can be read, spoken aloud, etc. In effect, runs
both convertDay and convertTime on the input, merging the results.
Args:
date (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day and time.
"""
dayString = self.convertDay(
date, prefix=prefix, weekday=weekday)
timeString = self.convertTime(date)
return dayString + " at " + timeString
|
crm416/semantic | semantic/dates.py | DateService.extractDate | python | def extractDate(self, inp):
dates = self.extractDates(inp)
for date in dates:
return date
return None | Returns the first date found in the input string, or None if not
found. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L380-L386 | [
"def extractDates(self, inp):\n \"\"\"Extract semantic date information from an input string.\n In effect, runs both parseDay and parseTime on the input\n string and merges the results to produce a comprehensive\n datetime object.\n\n Args:\n inp (str): Input string to be parsed.\n\n Returns:\n A list of datetime objects containing the extracted dates from the\n input snippet, or an empty list if not found.\n \"\"\"\n def merge(param):\n day, time = param\n if not (day or time):\n return None\n\n if not day:\n return time\n if not time:\n return day\n\n return datetime.datetime(\n day.year, day.month, day.day, time.hour, time.minute\n )\n\n days = self.extractDays(inp)\n times = self.extractTimes(inp)\n return map(merge, zip_longest(days, times, fillvalue=None))\n"
] | class DateService(object):
"""Initialize a DateService for extracting dates from text.
Args:
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A DateService which uses tz and now for all of its computations.
"""
def __init__(self, tz=None, now=None):
self.tz = tz
if now:
self.now = now
else:
self.now = datetime.datetime.now(tz=self.tz)
__months__ = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
__shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may',
'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec']
__daysOfWeek__ = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
__relativeDates__ = ['tomorrow', 'tonight', 'next']
__todayMatches__ = ['tonight', 'today', 'this morning',
'this evening', 'this afternoon']
__tomorrowMatches__ = ['tomorrow', 'next morning',
'next evening', 'next afternoon']
__dateDescriptors__ = {
'one': 1,
'first': 1,
'two': 2,
'second': 2,
'three': 3,
'third': 3,
'four': 4,
'fourth': 4,
'five': 5,
'fifth': 5,
'six': 6,
'sixth': 6,
'seven': 7,
'seventh': 7,
'eight': 8,
'eighth': 8,
'nine': 9,
'ninth': 9,
'ten': 10,
'tenth': 10,
'eleven': 11,
'eleventh': 11,
'twelve': 12,
'twelth': 12,
'thirteen': 13,
'thirteenth': 13,
'fourteen': 14,
'fourteenth': 14,
'fifteen': 15,
'fifteenth': 15,
'sixteen': 16,
'sixteenth': 16,
'seventeen': 17,
'seventeenth': 17,
'eighteen': 18,
'eighteenth': 18,
'nineteen': 19,
'nineteenth': 19,
'twenty': 20,
'twentieth': 20,
'twenty one': 21,
'twenty first': 21,
'twenty two': 22,
'twenty second': 22,
'twenty three': 23,
'twenty third': 23,
'twenty four': 24,
'twenty fourth': 24,
'twenty five': 25,
'twenty fifth': 25,
'twenty six': 26,
'twenty sixth': 26,
'twenty seven': 27,
'twenty seventh': 27,
'twenty eight': 28,
'twenty eighth': 28,
'twenty nine': 29,
'twenty ninth': 29,
'thirty': 30,
'thirtieth': 30,
'thirty one': 31,
'thirty first': 31
}
_dayRegex = re.compile(
r"""(?ix)
((week|day)s?\ from\ )?
(
tomorrow
|tonight
|today
|(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*)
)
""")
_timeRegex = re.compile(
r"""(?ix)
.*?
(
morning
|afternoon
|evening
|(\d{1,2}\:\d{2})\ ?(am|pm)?
|in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))?
)
.*?""")
def _preprocess(self, inp):
return inp.replace('-', ' ').lower()
def extractDays(self, inp):
"""Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found.
"""
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
# Grab 'three' of 'three weeks from'
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
# Generate best guess number
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
"""For safe evaluation of regex groups"""
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
# Convert extracted terms to datetime object
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches]
def extractDay(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
day = self.extractDay(inp)
if day:
return day[0]
return None
def extractTimes(self, inp):
"""Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found.
"""
def handleMatch(time):
relative = False
if not time:
return None
# Default times: 8am, 12pm, 7pm
elif time.group(1) == 'morning':
h = 8
m = 0
elif time.group(1) == 'afternoon':
h = 12
m = 0
elif time.group(1) == 'evening':
h = 19
m = 0
elif time.group(4) and time.group(5):
h, m = 0, 0
# Extract hours difference
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if time.group(5) == 'hours':
h += diff
else:
m += diff
# Extract minutes difference
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if time.group(8) == 'hours':
h += diff
else:
m += diff
relative = True
else:
# Convert from "HH:MM pm" format
t = time.group(2)
h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])
try:
if time.group(3) == 'pm':
h += 12
except IndexError:
pass
if relative:
return self.now + datetime.timedelta(hours=h, minutes=m)
else:
return datetime.datetime(
self.now.year, self.now.month, self.now.day, h, m
)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
def extractTime(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
times = self.extractTimes(inp)
if times:
return times[0]
return None
def extractDates(self, inp):
"""Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
"""
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None))
def convertDay(self, day, prefix="", weekday=False):
"""Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information.
"""
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
# Ex) Remove '0' from 'August 03'
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString
def convertTime(self, time):
"""Convert a datetime object representing a time into a human-ready
string that can be read, spoken aloud, etc.
Args:
time (datetime.date): A datetime object to be converted into text.
Returns:
A string representation of the input time, ignoring any day-related
information.
"""
# if ':00', ignore reporting minutes
m_format = ""
if time.minute:
m_format = ":%M"
timeString = time.strftime("%I" + m_format + " %p")
# if '07:30', cast to '7:30'
if not int(timeString[0]):
timeString = timeString[1:]
return timeString
def convertDate(self, date, prefix="", weekday=False):
"""Convert a datetime object representing into a human-ready
string that can be read, spoken aloud, etc. In effect, runs
both convertDay and convertTime on the input, merging the results.
Args:
date (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day and time.
"""
dayString = self.convertDay(
date, prefix=prefix, weekday=weekday)
timeString = self.convertTime(date)
return dayString + " at " + timeString
|
crm416/semantic | semantic/dates.py | DateService.convertDay | python | def convertDay(self, day, prefix="", weekday=False):
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
# Ex) Remove '0' from 'August 03'
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString | Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L388-L427 | [
"def sameDay(d1, d2):\n d = d1.day == d2.day\n m = d1.month == d2.month\n y = d1.year == d2.year\n return d and m and y\n"
] | class DateService(object):
"""Initialize a DateService for extracting dates from text.
Args:
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A DateService which uses tz and now for all of its computations.
"""
def __init__(self, tz=None, now=None):
self.tz = tz
if now:
self.now = now
else:
self.now = datetime.datetime.now(tz=self.tz)
__months__ = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
__shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may',
'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec']
__daysOfWeek__ = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
__relativeDates__ = ['tomorrow', 'tonight', 'next']
__todayMatches__ = ['tonight', 'today', 'this morning',
'this evening', 'this afternoon']
__tomorrowMatches__ = ['tomorrow', 'next morning',
'next evening', 'next afternoon']
__dateDescriptors__ = {
'one': 1,
'first': 1,
'two': 2,
'second': 2,
'three': 3,
'third': 3,
'four': 4,
'fourth': 4,
'five': 5,
'fifth': 5,
'six': 6,
'sixth': 6,
'seven': 7,
'seventh': 7,
'eight': 8,
'eighth': 8,
'nine': 9,
'ninth': 9,
'ten': 10,
'tenth': 10,
'eleven': 11,
'eleventh': 11,
'twelve': 12,
'twelth': 12,
'thirteen': 13,
'thirteenth': 13,
'fourteen': 14,
'fourteenth': 14,
'fifteen': 15,
'fifteenth': 15,
'sixteen': 16,
'sixteenth': 16,
'seventeen': 17,
'seventeenth': 17,
'eighteen': 18,
'eighteenth': 18,
'nineteen': 19,
'nineteenth': 19,
'twenty': 20,
'twentieth': 20,
'twenty one': 21,
'twenty first': 21,
'twenty two': 22,
'twenty second': 22,
'twenty three': 23,
'twenty third': 23,
'twenty four': 24,
'twenty fourth': 24,
'twenty five': 25,
'twenty fifth': 25,
'twenty six': 26,
'twenty sixth': 26,
'twenty seven': 27,
'twenty seventh': 27,
'twenty eight': 28,
'twenty eighth': 28,
'twenty nine': 29,
'twenty ninth': 29,
'thirty': 30,
'thirtieth': 30,
'thirty one': 31,
'thirty first': 31
}
_dayRegex = re.compile(
r"""(?ix)
((week|day)s?\ from\ )?
(
tomorrow
|tonight
|today
|(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*)
)
""")
_timeRegex = re.compile(
r"""(?ix)
.*?
(
morning
|afternoon
|evening
|(\d{1,2}\:\d{2})\ ?(am|pm)?
|in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))?
)
.*?""")
def _preprocess(self, inp):
return inp.replace('-', ' ').lower()
def extractDays(self, inp):
"""Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found.
"""
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
# Grab 'three' of 'three weeks from'
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
# Generate best guess number
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
"""For safe evaluation of regex groups"""
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
# Convert extracted terms to datetime object
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches]
def extractDay(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
day = self.extractDay(inp)
if day:
return day[0]
return None
def extractTimes(self, inp):
"""Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found.
"""
def handleMatch(time):
relative = False
if not time:
return None
# Default times: 8am, 12pm, 7pm
elif time.group(1) == 'morning':
h = 8
m = 0
elif time.group(1) == 'afternoon':
h = 12
m = 0
elif time.group(1) == 'evening':
h = 19
m = 0
elif time.group(4) and time.group(5):
h, m = 0, 0
# Extract hours difference
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if time.group(5) == 'hours':
h += diff
else:
m += diff
# Extract minutes difference
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if time.group(8) == 'hours':
h += diff
else:
m += diff
relative = True
else:
# Convert from "HH:MM pm" format
t = time.group(2)
h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])
try:
if time.group(3) == 'pm':
h += 12
except IndexError:
pass
if relative:
return self.now + datetime.timedelta(hours=h, minutes=m)
else:
return datetime.datetime(
self.now.year, self.now.month, self.now.day, h, m
)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
def extractTime(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
times = self.extractTimes(inp)
if times:
return times[0]
return None
def extractDates(self, inp):
"""Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
"""
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None))
def extractDate(self, inp):
"""Returns the first date found in the input string, or None if not
found."""
dates = self.extractDates(inp)
for date in dates:
return date
return None
def convertTime(self, time):
"""Convert a datetime object representing a time into a human-ready
string that can be read, spoken aloud, etc.
Args:
time (datetime.date): A datetime object to be converted into text.
Returns:
A string representation of the input time, ignoring any day-related
information.
"""
# if ':00', ignore reporting minutes
m_format = ""
if time.minute:
m_format = ":%M"
timeString = time.strftime("%I" + m_format + " %p")
# if '07:30', cast to '7:30'
if not int(timeString[0]):
timeString = timeString[1:]
return timeString
def convertDate(self, date, prefix="", weekday=False):
"""Convert a datetime object representing into a human-ready
string that can be read, spoken aloud, etc. In effect, runs
both convertDay and convertTime on the input, merging the results.
Args:
date (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day and time.
"""
dayString = self.convertDay(
date, prefix=prefix, weekday=weekday)
timeString = self.convertTime(date)
return dayString + " at " + timeString
|
crm416/semantic | semantic/dates.py | DateService.convertTime | python | def convertTime(self, time):
# if ':00', ignore reporting minutes
m_format = ""
if time.minute:
m_format = ":%M"
timeString = time.strftime("%I" + m_format + " %p")
# if '07:30', cast to '7:30'
if not int(timeString[0]):
timeString = timeString[1:]
return timeString | Convert a datetime object representing a time into a human-ready
string that can be read, spoken aloud, etc.
Args:
time (datetime.date): A datetime object to be converted into text.
Returns:
A string representation of the input time, ignoring any day-related
information. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L429-L451 | null | class DateService(object):
"""Initialize a DateService for extracting dates from text.
Args:
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A DateService which uses tz and now for all of its computations.
"""
def __init__(self, tz=None, now=None):
self.tz = tz
if now:
self.now = now
else:
self.now = datetime.datetime.now(tz=self.tz)
__months__ = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
__shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may',
'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec']
__daysOfWeek__ = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
__relativeDates__ = ['tomorrow', 'tonight', 'next']
__todayMatches__ = ['tonight', 'today', 'this morning',
'this evening', 'this afternoon']
__tomorrowMatches__ = ['tomorrow', 'next morning',
'next evening', 'next afternoon']
__dateDescriptors__ = {
'one': 1,
'first': 1,
'two': 2,
'second': 2,
'three': 3,
'third': 3,
'four': 4,
'fourth': 4,
'five': 5,
'fifth': 5,
'six': 6,
'sixth': 6,
'seven': 7,
'seventh': 7,
'eight': 8,
'eighth': 8,
'nine': 9,
'ninth': 9,
'ten': 10,
'tenth': 10,
'eleven': 11,
'eleventh': 11,
'twelve': 12,
'twelth': 12,
'thirteen': 13,
'thirteenth': 13,
'fourteen': 14,
'fourteenth': 14,
'fifteen': 15,
'fifteenth': 15,
'sixteen': 16,
'sixteenth': 16,
'seventeen': 17,
'seventeenth': 17,
'eighteen': 18,
'eighteenth': 18,
'nineteen': 19,
'nineteenth': 19,
'twenty': 20,
'twentieth': 20,
'twenty one': 21,
'twenty first': 21,
'twenty two': 22,
'twenty second': 22,
'twenty three': 23,
'twenty third': 23,
'twenty four': 24,
'twenty fourth': 24,
'twenty five': 25,
'twenty fifth': 25,
'twenty six': 26,
'twenty sixth': 26,
'twenty seven': 27,
'twenty seventh': 27,
'twenty eight': 28,
'twenty eighth': 28,
'twenty nine': 29,
'twenty ninth': 29,
'thirty': 30,
'thirtieth': 30,
'thirty one': 31,
'thirty first': 31
}
_dayRegex = re.compile(
r"""(?ix)
((week|day)s?\ from\ )?
(
tomorrow
|tonight
|today
|(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*)
)
""")
_timeRegex = re.compile(
r"""(?ix)
.*?
(
morning
|afternoon
|evening
|(\d{1,2}\:\d{2})\ ?(am|pm)?
|in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))?
)
.*?""")
def _preprocess(self, inp):
return inp.replace('-', ' ').lower()
def extractDays(self, inp):
"""Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found.
"""
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
# Grab 'three' of 'three weeks from'
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
# Generate best guess number
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
"""For safe evaluation of regex groups"""
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
# Convert extracted terms to datetime object
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches]
def extractDay(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
day = self.extractDay(inp)
if day:
return day[0]
return None
def extractTimes(self, inp):
"""Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found.
"""
def handleMatch(time):
relative = False
if not time:
return None
# Default times: 8am, 12pm, 7pm
elif time.group(1) == 'morning':
h = 8
m = 0
elif time.group(1) == 'afternoon':
h = 12
m = 0
elif time.group(1) == 'evening':
h = 19
m = 0
elif time.group(4) and time.group(5):
h, m = 0, 0
# Extract hours difference
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if time.group(5) == 'hours':
h += diff
else:
m += diff
# Extract minutes difference
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if time.group(8) == 'hours':
h += diff
else:
m += diff
relative = True
else:
# Convert from "HH:MM pm" format
t = time.group(2)
h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])
try:
if time.group(3) == 'pm':
h += 12
except IndexError:
pass
if relative:
return self.now + datetime.timedelta(hours=h, minutes=m)
else:
return datetime.datetime(
self.now.year, self.now.month, self.now.day, h, m
)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
def extractTime(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
times = self.extractTimes(inp)
if times:
return times[0]
return None
def extractDates(self, inp):
"""Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
"""
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None))
def extractDate(self, inp):
"""Returns the first date found in the input string, or None if not
found."""
dates = self.extractDates(inp)
for date in dates:
return date
return None
def convertDay(self, day, prefix="", weekday=False):
"""Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information.
"""
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
# Ex) Remove '0' from 'August 03'
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString
def convertDate(self, date, prefix="", weekday=False):
"""Convert a datetime object representing into a human-ready
string that can be read, spoken aloud, etc. In effect, runs
both convertDay and convertTime on the input, merging the results.
Args:
date (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day and time.
"""
dayString = self.convertDay(
date, prefix=prefix, weekday=weekday)
timeString = self.convertTime(date)
return dayString + " at " + timeString
|
crm416/semantic | semantic/dates.py | DateService.convertDate | python | def convertDate(self, date, prefix="", weekday=False):
dayString = self.convertDay(
date, prefix=prefix, weekday=weekday)
timeString = self.convertTime(date)
return dayString + " at " + timeString | Convert a datetime object representing into a human-ready
string that can be read, spoken aloud, etc. In effect, runs
both convertDay and convertTime on the input, merging the results.
Args:
date (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day and time. | train | https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L453-L473 | [
"def convertDay(self, day, prefix=\"\", weekday=False):\n \"\"\"Convert a datetime object representing a day into a human-ready\n string that can be read, spoken aloud, etc.\n\n Args:\n day (datetime.date): A datetime object to be converted into text.\n prefix (str): An optional argument that prefixes the converted\n string. For example, if prefix=\"in\", you'd receive \"in two\n days\", rather than \"two days\", while the method would still\n return \"tomorrow\" (rather than \"in tomorrow\").\n weekday (bool): An optional argument that returns \"Monday, Oct. 1\"\n if True, rather than \"Oct. 1\".\n\n Returns:\n A string representation of the input day, ignoring any time-related\n information.\n \"\"\"\n def sameDay(d1, d2):\n d = d1.day == d2.day\n m = d1.month == d2.month\n y = d1.year == d2.year\n return d and m and y\n\n tom = self.now + datetime.timedelta(days=1)\n\n if sameDay(day, self.now):\n return \"today\"\n elif sameDay(day, tom):\n return \"tomorrow\"\n\n if weekday:\n dayString = day.strftime(\"%A, %B %d\")\n else:\n dayString = day.strftime(\"%B %d\")\n\n # Ex) Remove '0' from 'August 03'\n if not int(dayString[-2]):\n dayString = dayString[:-2] + dayString[-1]\n\n return prefix + \" \" + dayString\n",
"def convertTime(self, time):\n \"\"\"Convert a datetime object representing a time into a human-ready\n string that can be read, spoken aloud, etc.\n\n Args:\n time (datetime.date): A datetime object to be converted into text.\n\n Returns:\n A string representation of the input time, ignoring any day-related\n information.\n \"\"\"\n # if ':00', ignore reporting minutes\n m_format = \"\"\n if time.minute:\n m_format = \":%M\"\n\n timeString = time.strftime(\"%I\" + m_format + \" %p\")\n\n # if '07:30', cast to '7:30'\n if not int(timeString[0]):\n timeString = timeString[1:]\n\n return timeString\n"
] | class DateService(object):
"""Initialize a DateService for extracting dates from text.
Args:
tz: An optional Pytz timezone. All datetime objects returned will
be relative to the supplied timezone, or timezone-less if none
is supplied.
now: The time to which all returned datetime objects should be
relative. For example, if the text is "In 5 hours", the
datetime returned will be now + datetime.timedelta(hours=5).
Uses datetime.datetime.now() if none is supplied.
Returns:
A DateService which uses tz and now for all of its computations.
"""
def __init__(self, tz=None, now=None):
self.tz = tz
if now:
self.now = now
else:
self.now = datetime.datetime.now(tz=self.tz)
__months__ = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
__shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may',
'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec']
__daysOfWeek__ = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
__relativeDates__ = ['tomorrow', 'tonight', 'next']
__todayMatches__ = ['tonight', 'today', 'this morning',
'this evening', 'this afternoon']
__tomorrowMatches__ = ['tomorrow', 'next morning',
'next evening', 'next afternoon']
__dateDescriptors__ = {
'one': 1,
'first': 1,
'two': 2,
'second': 2,
'three': 3,
'third': 3,
'four': 4,
'fourth': 4,
'five': 5,
'fifth': 5,
'six': 6,
'sixth': 6,
'seven': 7,
'seventh': 7,
'eight': 8,
'eighth': 8,
'nine': 9,
'ninth': 9,
'ten': 10,
'tenth': 10,
'eleven': 11,
'eleventh': 11,
'twelve': 12,
'twelth': 12,
'thirteen': 13,
'thirteenth': 13,
'fourteen': 14,
'fourteenth': 14,
'fifteen': 15,
'fifteenth': 15,
'sixteen': 16,
'sixteenth': 16,
'seventeen': 17,
'seventeenth': 17,
'eighteen': 18,
'eighteenth': 18,
'nineteen': 19,
'nineteenth': 19,
'twenty': 20,
'twentieth': 20,
'twenty one': 21,
'twenty first': 21,
'twenty two': 22,
'twenty second': 22,
'twenty three': 23,
'twenty third': 23,
'twenty four': 24,
'twenty fourth': 24,
'twenty five': 25,
'twenty fifth': 25,
'twenty six': 26,
'twenty sixth': 26,
'twenty seven': 27,
'twenty seventh': 27,
'twenty eight': 28,
'twenty eighth': 28,
'twenty nine': 29,
'twenty ninth': 29,
'thirty': 30,
'thirtieth': 30,
'thirty one': 31,
'thirty first': 31
}
_dayRegex = re.compile(
r"""(?ix)
((week|day)s?\ from\ )?
(
tomorrow
|tonight
|today
|(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)
|(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*)
)
""")
_timeRegex = re.compile(
r"""(?ix)
.*?
(
morning
|afternoon
|evening
|(\d{1,2}\:\d{2})\ ?(am|pm)?
|in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))?
)
.*?""")
def _preprocess(self, inp):
return inp.replace('-', ' ').lower()
def extractDays(self, inp):
"""Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found.
"""
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
# Grab 'three' of 'three weeks from'
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
# Generate best guess number
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
"""For safe evaluation of regex groups"""
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
# Convert extracted terms to datetime object
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches]
def extractDay(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
day = self.extractDay(inp)
if day:
return day[0]
return None
def extractTimes(self, inp):
"""Extracts time-related information from an input string.
Ignores any information related to the specific date, focusing
on the time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted times from the
input snippet, or an empty list if none found.
"""
def handleMatch(time):
relative = False
if not time:
return None
# Default times: 8am, 12pm, 7pm
elif time.group(1) == 'morning':
h = 8
m = 0
elif time.group(1) == 'afternoon':
h = 12
m = 0
elif time.group(1) == 'evening':
h = 19
m = 0
elif time.group(4) and time.group(5):
h, m = 0, 0
# Extract hours difference
converter = NumberService()
try:
diff = converter.parse(time.group(4))
except:
return None
if time.group(5) == 'hours':
h += diff
else:
m += diff
# Extract minutes difference
if time.group(6):
converter = NumberService()
try:
diff = converter.parse(time.group(7))
except:
return None
if time.group(8) == 'hours':
h += diff
else:
m += diff
relative = True
else:
# Convert from "HH:MM pm" format
t = time.group(2)
h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1])
try:
if time.group(3) == 'pm':
h += 12
except IndexError:
pass
if relative:
return self.now + datetime.timedelta(hours=h, minutes=m)
else:
return datetime.datetime(
self.now.year, self.now.month, self.now.day, h, m
)
inp = self._preprocess(inp)
return [handleMatch(time) for time in self._timeRegex.finditer(inp)]
def extractTime(self, inp):
"""Returns the first time-related date found in the input string,
or None if not found."""
times = self.extractTimes(inp)
if times:
return times[0]
return None
def extractDates(self, inp):
"""Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
"""
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None))
def extractDate(self, inp):
"""Returns the first date found in the input string, or None if not
found."""
dates = self.extractDates(inp)
for date in dates:
return date
return None
def convertDay(self, day, prefix="", weekday=False):
"""Convert a datetime object representing a day into a human-ready
string that can be read, spoken aloud, etc.
Args:
day (datetime.date): A datetime object to be converted into text.
prefix (str): An optional argument that prefixes the converted
string. For example, if prefix="in", you'd receive "in two
days", rather than "two days", while the method would still
return "tomorrow" (rather than "in tomorrow").
weekday (bool): An optional argument that returns "Monday, Oct. 1"
if True, rather than "Oct. 1".
Returns:
A string representation of the input day, ignoring any time-related
information.
"""
def sameDay(d1, d2):
d = d1.day == d2.day
m = d1.month == d2.month
y = d1.year == d2.year
return d and m and y
tom = self.now + datetime.timedelta(days=1)
if sameDay(day, self.now):
return "today"
elif sameDay(day, tom):
return "tomorrow"
if weekday:
dayString = day.strftime("%A, %B %d")
else:
dayString = day.strftime("%B %d")
# Ex) Remove '0' from 'August 03'
if not int(dayString[-2]):
dayString = dayString[:-2] + dayString[-1]
return prefix + " " + dayString
def convertTime(self, time):
"""Convert a datetime object representing a time into a human-ready
string that can be read, spoken aloud, etc.
Args:
time (datetime.date): A datetime object to be converted into text.
Returns:
A string representation of the input time, ignoring any day-related
information.
"""
# if ':00', ignore reporting minutes
m_format = ""
if time.minute:
m_format = ":%M"
timeString = time.strftime("%I" + m_format + " %p")
# if '07:30', cast to '7:30'
if not int(timeString[0]):
timeString = timeString[1:]
return timeString
|
Parsely/schemato | schemato/validator.py | SchemaValidator.validate | python | def validate(self):
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result | Iterate over all triples in the graph and validate each one
appropriately | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L34-L55 | [
"def add_error(self, warning):\n if warning:\n if warning.level == ValidationResult.WARNING:\n self.warnings.append(warning)\n elif warning.level == ValidationResult.ERROR:\n self.errors.append(warning)\n",
"def _check_triple(self, triple):\n \"\"\"compare triple to ontology, return error or None\"\"\"\n subj, pred, obj = triple\n\n if self._should_ignore_predicate(pred):\n log.info(\"Ignoring triple with predicate '{}'\"\n .format(self._field_name_from_uri(pred)))\n return\n\n classes = []\n log.warning(\"Possible member %s found\" % pred)\n\n pred = self._expand_qname(pred)\n\n if self._namespace_from_uri(pred) not in self.allowed_namespaces:\n log.info(\"Member %s does not use an allowed namespace\", pred)\n return\n\n instanceof = self._is_instance((subj, pred, obj))\n if type(instanceof) == rt.URIRef:\n instanceof = self._expand_qname(instanceof)\n\n if hasattr(self.schema_def, \"attributes_by_class\") and \\\n not self.schema_def.attributes_by_class:\n log.info(\"Parsed ontology not found. Parsing...\")\n self.schema_def.parse_ontology()\n\n class_invalid = self._validate_class(instanceof)\n if class_invalid:\n log.warning(\"Invalid class %s\" % instanceof)\n return class_invalid\n # TODO - the above sometimes fails when a single object has more than\n # one rdfa type (eg <span property=\"schema:creator rnews:creator\"\n # typeof=\"schema:Person rnews:Person\">\n # Graph chooses the type in an arbitrary order, so it's unreliable\n # eg: http://semanticweb.com/the-impact-of-rdfa_b35003\n\n classes = self._superclasses_for_subject(self.graph, instanceof)\n classes.append(instanceof)\n\n member_invalid = self._validate_member(pred, classes, instanceof)\n if member_invalid:\n log.warning(\"Invalid member of class\")\n return member_invalid\n\n dupe_invalid = self._validate_duplication((subj, pred), instanceof)\n if dupe_invalid:\n log.warning(\"Duplication found\")\n return dupe_invalid\n\n # collect a list of checked attributes\n self.checked_attributes.append((subj, pred))\n log.warning(\"successfully validated triple, no errors\")\n return\n"
] | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._check_triple | python | def _check_triple(self, triple):
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return | compare triple to ontology, return error or None | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L61-L114 | [
"def _should_ignore_predicate(self, predicate):\n ignored_predicates = ['type', 'item', 'first', 'rest']\n return self._field_name_from_uri(predicate) in ignored_predicates\n"
] | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._validate_class | python | def _validate_class(self, cl):
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num']) | return error if class `cl` is not found in the ontology | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L116-L124 | null | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._validate_member | python | def _validate_member(self, member, classes, instanceof):
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num']) | return error if `member` is not a member of any class in
`classes` | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L138-L160 | null | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._validate_duplication | python | def _validate_duplication(self, subj_and_pred, cl):
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num']) | returns error if we've already seen the member `pred` on `subj` | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L162-L172 | null | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._superclasses_for_subject | python | def _superclasses_for_subject(self, graph, typeof):
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes | helper, returns a list of all superclasses of a given class | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L174-L189 | null | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._is_instance | python | def _is_instance(self, triple):
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] | helper, returns the class type of subj | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L191-L203 | null | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
# return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._field_name_from_uri | python | def _field_name_from_uri(self, uri):
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1] | helper, returns the name of an attribute (without namespace prefix) | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L205-L213 | null | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._namespace_from_uri | python | def _namespace_from_uri(self, uri):
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1]) | returns the expanded namespace prefix of a uri | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L215-L223 | null | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _expand_qname(self, qname):
"""expand a qualified name's namespace prefix to include the resolved
namespace root url"""
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/validator.py | SchemaValidator._expand_qname | python | def _expand_qname(self, qname):
if type(qname) is not rt.URIRef:
raise TypeError("Cannot expand qname of type {}, must be URIRef"
.format(type(qname)))
for ns in self.graph.namespaces():
if ns[0] == qname.split(':')[0]:
return rt.URIRef("%s%s" % (ns[1], qname.split(':')[-1]))
return qname | expand a qualified name's namespace prefix to include the resolved
namespace root url | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/validator.py#L225-L234 | null | class SchemaValidator(object):
"""ASSUMPTIONS:
This class knows about the file being validated, but it recieves that
file as a graph and a doc_lines
It does not perform any parsing logic on the file
It recieves a "validatable" graph object and returns errors"""
def __init__(self, graph, doc_lines, url=""):
super(SchemaValidator, self).__init__()
self.schema_def = None
self.allowed_namespaces = []
self.graph = graph
self.checked_attributes = []
self.stripped_attribute_names = {}
log.info("init validator: %s" % self.__class__.__name__)
self.doc_lines = doc_lines
self.err = partial(_error, doc_lines=self.doc_lines)
def validate(self):
"""Iterate over all triples in the graph and validate each one
appropriately
"""
log.info("{}\nValidating against {}"
.format("-" * 100, self.schema_def.__class__.__name__))
if not self.schema_def:
raise ValueError("No schema definition supplied.")
self.checked_attributes = []
# TODO - this should maybe choose the actually used namespace, not just
# the first one in the list
result = ValidationResult(self.allowed_namespaces[0],
self.schema_def.__class__.__name__)
for subject, predicate, object_ in self.graph:
log.info("\nsubj: {subj}\npred: {pred}\n obj: {obj}"
.format(subj=subject, pred=predicate,
obj=object_.encode('utf-8')))
result.add_error(self._check_triple((subject, predicate, object_)))
return result
def _should_ignore_predicate(self, predicate):
ignored_predicates = ['type', 'item', 'first', 'rest']
return self._field_name_from_uri(predicate) in ignored_predicates
def _check_triple(self, triple):
"""compare triple to ontology, return error or None"""
subj, pred, obj = triple
if self._should_ignore_predicate(pred):
log.info("Ignoring triple with predicate '{}'"
.format(self._field_name_from_uri(pred)))
return
classes = []
log.warning("Possible member %s found" % pred)
pred = self._expand_qname(pred)
if self._namespace_from_uri(pred) not in self.allowed_namespaces:
log.info("Member %s does not use an allowed namespace", pred)
return
instanceof = self._is_instance((subj, pred, obj))
if type(instanceof) == rt.URIRef:
instanceof = self._expand_qname(instanceof)
if hasattr(self.schema_def, "attributes_by_class") and \
not self.schema_def.attributes_by_class:
log.info("Parsed ontology not found. Parsing...")
self.schema_def.parse_ontology()
class_invalid = self._validate_class(instanceof)
if class_invalid:
log.warning("Invalid class %s" % instanceof)
return class_invalid
# TODO - the above sometimes fails when a single object has more than
# one rdfa type (eg <span property="schema:creator rnews:creator"
# typeof="schema:Person rnews:Person">
# Graph chooses the type in an arbitrary order, so it's unreliable
# eg: http://semanticweb.com/the-impact-of-rdfa_b35003
classes = self._superclasses_for_subject(self.graph, instanceof)
classes.append(instanceof)
member_invalid = self._validate_member(pred, classes, instanceof)
if member_invalid:
log.warning("Invalid member of class")
return member_invalid
dupe_invalid = self._validate_duplication((subj, pred), instanceof)
if dupe_invalid:
log.warning("Duplication found")
return dupe_invalid
# collect a list of checked attributes
self.checked_attributes.append((subj, pred))
log.warning("successfully validated triple, no errors")
return
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _get_stripped_attributes(self, member, classes):
stripped = []
if member not in self.stripped_attribute_names:
for cl in classes:
stripped.extend(
[self._field_name_from_uri(attr)
for attr in self.schema_def.attributes_by_class[cl]])
self.stripped_attribute_names[member] = stripped
else:
stripped = self.stripped_attribute_names[member]
return stripped
def _validate_member(self, member, classes, instanceof):
"""return error if `member` is not a member of any class in
`classes`
"""
log.info("Validating member %s" % member)
stripped = self._get_stripped_attributes(member, classes)
if self._field_name_from_uri(member) in stripped:
all_class_members = sum([self.schema_def.attributes_by_class[cl]
for cl in classes], [])
if member in all_class_members:
return
if self._namespace_from_uri(member) in self.allowed_namespaces:
err = self.err("Unoficially allowed namespace {0}",
self._namespace_from_uri(member))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
else:
err = self.err("{0} - invalid member of {1}",
self._field_name_from_uri(member),
self._field_name_from_uri(instanceof))
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num'])
def _validate_duplication(self, subj_and_pred, cl):
"""returns error if we've already seen the member `pred` on `subj`"""
subj, pred = subj_and_pred
log.info("Validating duplication of member %s" % pred)
if (subj, pred) in self.checked_attributes:
err = self.err("{0} - duplicated member of {1}",
self._field_name_from_uri(pred),
self._field_name_from_uri(cl))
return ValidationWarning(ValidationResult.WARNING, err['err'],
err['line'], err['num'])
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes
def _is_instance(self, triple):
"""helper, returns the class type of subj"""
subj, pred, obj = triple
input_pred_ns = self._namespace_from_uri(self._expand_qname(pred))
triples = self.graph.triples(
(subj, rt.URIRef(self.schema_def.lexicon['type']), None)
)
if triples:
for tr in triples:
triple_obj_ns = self._namespace_from_uri(
self._expand_qname(tr[2]))
if input_pred_ns == triple_obj_ns: # match namespaces
return tr[2] # return the object
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1]
def _namespace_from_uri(self, uri):
"""returns the expanded namespace prefix of a uri"""
# TODO - this could be helped a bunch with proper use of the graph API
# it seems a bit fragile to treat these as simple string-splits
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return "%s/" % '/'.join(uri.split('/')[:-1])
return "%s#" % '#'.join(parts[:-1])
def _build_search_string(self, uri):
return self._field_name_from_uri(uri)
|
Parsely/schemato | schemato/schemas/parselypage.py | ParselyPageParser.original_unescape | python | def original_unescape(self, s):
if isinstance(s, basestring):
return unicode(HTMLParser.unescape(self, s))
elif isinstance(s, list):
return [unicode(HTMLParser.unescape(self, item)) for item in s]
else:
return s | Since we need to use this sometimes | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemas/parselypage.py#L35-L42 | null | class ParselyPageParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.ppage = None
def handle_starttag(self, tag, attrs):
if self.ppage is not None or tag != "meta":
return
attrs = dict(attrs)
if attrs.get("name") == "parsely-page":
ppage = attrs.get("content", attrs.get("value"))
if ppage:
try:
self.ppage = json.loads(ppage)
except:
raise HTMLParseError("bad ppage") # bad ppage
def unescape(self, s):
return s
|
Parsely/schemato | schemato/schemas/parselypage.py | ParselyPageValidator.get_standard | python | def get_standard(self):
try:
res = urlopen(PARSELY_PAGE_SCHEMA)
except:
return []
text = res.read()
if isinstance(text, bytes):
text = text.decode('utf-8')
tree = etree.parse(StringIO(text))
stdref = tree.xpath("//div/@about")
return [a.split(':')[1] for a in stdref] | get list of allowed parameters | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemas/parselypage.py#L56-L67 | null | class ParselyPageValidator(SchemaValidator):
def __init__(self, graph, doc_lines, url=""):
super(ParselyPageValidator, self).__init__(graph, doc_lines, url=url)
self.text = '\n'.join([a[0] for a in doc_lines])
self.stdref = self.get_standard()
self.url_validator = HttpUrl()
self.data = self._get_parselypage(self.text)
def _get_parselypage(self, body):
"""extract the parsely-page meta content from a page"""
parser = ParselyPageParser()
ret = None
try:
parser.feed(body)
except HTMLParseError:
pass # ignore and hope we got ppage
if parser.ppage is None:
return
ret = parser.ppage
if ret:
ret = {parser.original_unescape(k): parser.original_unescape(v)
for k, v in iteritems(ret)}
return ret
def validate(self):
result = ValidationResult("parsely-page", self.__class__.__name__)
if self.data:
for key in self.data:
res = self.check_key(key)
if res:
result.add_error(res)
return result
def check_key(self, key):
if key not in self.stdref:
err = _error("{0} - invalid parsely-page field", key,
doc_lines=self.doc_lines)
return ValidationWarning(
ValidationResult.ERROR, err['err'], err['line'], err['num'])
if key in ["link", "image_url"]:
if not self.url_validator(self.data[key]):
err = _error(
"{0} - invalid url for field '{1}'", self.data[key], key,
doc_lines=self.doc_lines)
return ValidationWarning(
ValidationResult.ERROR, err['err'], err['line'],
err['num'])
return None
|
Parsely/schemato | schemato/schemas/parselypage.py | ParselyPageValidator._get_parselypage | python | def _get_parselypage(self, body):
parser = ParselyPageParser()
ret = None
try:
parser.feed(body)
except HTMLParseError:
pass # ignore and hope we got ppage
if parser.ppage is None:
return
ret = parser.ppage
if ret:
ret = {parser.original_unescape(k): parser.original_unescape(v)
for k, v in iteritems(ret)}
return ret | extract the parsely-page meta content from a page | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemas/parselypage.py#L69-L84 | null | class ParselyPageValidator(SchemaValidator):
def __init__(self, graph, doc_lines, url=""):
super(ParselyPageValidator, self).__init__(graph, doc_lines, url=url)
self.text = '\n'.join([a[0] for a in doc_lines])
self.stdref = self.get_standard()
self.url_validator = HttpUrl()
self.data = self._get_parselypage(self.text)
def get_standard(self):
"""get list of allowed parameters"""
try:
res = urlopen(PARSELY_PAGE_SCHEMA)
except:
return []
text = res.read()
if isinstance(text, bytes):
text = text.decode('utf-8')
tree = etree.parse(StringIO(text))
stdref = tree.xpath("//div/@about")
return [a.split(':')[1] for a in stdref]
def validate(self):
result = ValidationResult("parsely-page", self.__class__.__name__)
if self.data:
for key in self.data:
res = self.check_key(key)
if res:
result.add_error(res)
return result
def check_key(self, key):
if key not in self.stdref:
err = _error("{0} - invalid parsely-page field", key,
doc_lines=self.doc_lines)
return ValidationWarning(
ValidationResult.ERROR, err['err'], err['line'], err['num'])
if key in ["link", "image_url"]:
if not self.url_validator(self.data[key]):
err = _error(
"{0} - invalid url for field '{1}'", self.data[key], key,
doc_lines=self.doc_lines)
return ValidationWarning(
ValidationResult.ERROR, err['err'], err['line'],
err['num'])
return None
|
Parsely/schemato | schemato/schemadef.py | SchemaDef._read_schema | python | def _read_schema(self):
cache_filename = os.path.join(
CACHE_ROOT, "%s.smt" % self._representation)
log.info("Attempting to read local schema at %s" % cache_filename)
try:
if time.time() - os.stat(cache_filename).st_mtime > CACHE_EXPIRY:
log.warning("Cache expired, re-pulling")
self._pull_schema_definition(cache_filename)
except OSError:
log.warning("Local schema not found. Pulling from web.")
self._pull_schema_definition(cache_filename)
else:
log.info("Success")
return cache_filename | return the local filename of the definition file for this schema
if not present or older than expiry, pull the latest version from
the web at self._ontology_file | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemadef.py#L38-L55 | null | class SchemaDef(object):
"""class to handle the loading and caching of a standard
ASSUMPTIONS:
This class holds the definition of a standard
It does not perform any validation against that standard
This class knows nothing about the input file being validated
"""
errorstring_base = \
"Are you calling parse_ontology from the base SchemaDef class?"
def __init__(self):
super(SchemaDef, self).__init__()
self._ontology_file = ""
self.ontology = defaultdict(list)
self.attributes_by_class = {}
self._ontology_parser_function = None
self.lexicon = {}
def _pull_schema_definition(self, fname):
"""download an ontology definition from the web"""
std_url = urlopen(self._ontology_file)
cached_std = open(fname, "w+")
cached_std.write(std_url.read())
cached_std.close()
def parse_ontology(self):
"""place the ontology graph into a set of custom data structures
for use by the validator"""
start = time.clock()
log.info("Parsing ontology file for %s" % self.__class__.__name__)
for subj, pred, obj in self._schema_nodes():
if subj not in self.attributes_by_class:
if obj == rt.URIRef(self.lexicon['class']) and \
pred == rt.URIRef(self.lexicon['type']):
self.attributes_by_class[subj] = []
leaves = [(subj, pred, obj)]
if type(obj) == rt.BNode:
leaves = deepest_node((subj, pred, obj), self.graph)
for s, p, o in leaves:
if o not in self.attributes_by_class:
self.attributes_by_class[o] = []
if pred == rt.URIRef(self.lexicon['domain']):
self.attributes_by_class[o].append(subj)
if not self.attributes_by_class:
log.info("No nodes found in ontology")
log.info("Ontology parsing complete in {}".format(
(time.clock() - start) * 1000))
def _schema_nodes(self):
"""parse self._ontology_file into a graph"""
name, ext = os.path.splitext(self._ontology_file)
if ext in ['.ttl']:
self._ontology_parser_function = \
lambda s: rdflib.Graph().parse(s, format='n3')
else:
self._ontology_parser_function = \
lambda s: pyRdfa().graph_from_source(s)
if not self._ontology_parser_function:
raise ValueError(
"No function found to parse ontology. %s" %
self.errorstring_base)
if not self._ontology_file:
raise ValueError(
"No ontology file specified. %s" % self.errorstring_base)
if not self.lexicon:
raise ValueError(
"No lexicon object assigned. %s" % self.errorstring_base)
latest_file = self._read_schema()
try:
self.graph = self._ontology_parser_function(latest_file)
except:
raise IOError("Error parsing ontology at %s" % latest_file)
for subj, pred, obj in self.graph:
self.ontology[subj].append((pred, obj))
yield (subj, pred, obj)
|
Parsely/schemato | schemato/schemadef.py | SchemaDef._pull_schema_definition | python | def _pull_schema_definition(self, fname):
std_url = urlopen(self._ontology_file)
cached_std = open(fname, "w+")
cached_std.write(std_url.read())
cached_std.close() | download an ontology definition from the web | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemadef.py#L57-L62 | null | class SchemaDef(object):
"""class to handle the loading and caching of a standard
ASSUMPTIONS:
This class holds the definition of a standard
It does not perform any validation against that standard
This class knows nothing about the input file being validated
"""
errorstring_base = \
"Are you calling parse_ontology from the base SchemaDef class?"
def __init__(self):
super(SchemaDef, self).__init__()
self._ontology_file = ""
self.ontology = defaultdict(list)
self.attributes_by_class = {}
self._ontology_parser_function = None
self.lexicon = {}
def _read_schema(self):
"""return the local filename of the definition file for this schema
if not present or older than expiry, pull the latest version from
the web at self._ontology_file"""
cache_filename = os.path.join(
CACHE_ROOT, "%s.smt" % self._representation)
log.info("Attempting to read local schema at %s" % cache_filename)
try:
if time.time() - os.stat(cache_filename).st_mtime > CACHE_EXPIRY:
log.warning("Cache expired, re-pulling")
self._pull_schema_definition(cache_filename)
except OSError:
log.warning("Local schema not found. Pulling from web.")
self._pull_schema_definition(cache_filename)
else:
log.info("Success")
return cache_filename
def parse_ontology(self):
"""place the ontology graph into a set of custom data structures
for use by the validator"""
start = time.clock()
log.info("Parsing ontology file for %s" % self.__class__.__name__)
for subj, pred, obj in self._schema_nodes():
if subj not in self.attributes_by_class:
if obj == rt.URIRef(self.lexicon['class']) and \
pred == rt.URIRef(self.lexicon['type']):
self.attributes_by_class[subj] = []
leaves = [(subj, pred, obj)]
if type(obj) == rt.BNode:
leaves = deepest_node((subj, pred, obj), self.graph)
for s, p, o in leaves:
if o not in self.attributes_by_class:
self.attributes_by_class[o] = []
if pred == rt.URIRef(self.lexicon['domain']):
self.attributes_by_class[o].append(subj)
if not self.attributes_by_class:
log.info("No nodes found in ontology")
log.info("Ontology parsing complete in {}".format(
(time.clock() - start) * 1000))
def _schema_nodes(self):
"""parse self._ontology_file into a graph"""
name, ext = os.path.splitext(self._ontology_file)
if ext in ['.ttl']:
self._ontology_parser_function = \
lambda s: rdflib.Graph().parse(s, format='n3')
else:
self._ontology_parser_function = \
lambda s: pyRdfa().graph_from_source(s)
if not self._ontology_parser_function:
raise ValueError(
"No function found to parse ontology. %s" %
self.errorstring_base)
if not self._ontology_file:
raise ValueError(
"No ontology file specified. %s" % self.errorstring_base)
if not self.lexicon:
raise ValueError(
"No lexicon object assigned. %s" % self.errorstring_base)
latest_file = self._read_schema()
try:
self.graph = self._ontology_parser_function(latest_file)
except:
raise IOError("Error parsing ontology at %s" % latest_file)
for subj, pred, obj in self.graph:
self.ontology[subj].append((pred, obj))
yield (subj, pred, obj)
|
Parsely/schemato | schemato/schemadef.py | SchemaDef.parse_ontology | python | def parse_ontology(self):
start = time.clock()
log.info("Parsing ontology file for %s" % self.__class__.__name__)
for subj, pred, obj in self._schema_nodes():
if subj not in self.attributes_by_class:
if obj == rt.URIRef(self.lexicon['class']) and \
pred == rt.URIRef(self.lexicon['type']):
self.attributes_by_class[subj] = []
leaves = [(subj, pred, obj)]
if type(obj) == rt.BNode:
leaves = deepest_node((subj, pred, obj), self.graph)
for s, p, o in leaves:
if o not in self.attributes_by_class:
self.attributes_by_class[o] = []
if pred == rt.URIRef(self.lexicon['domain']):
self.attributes_by_class[o].append(subj)
if not self.attributes_by_class:
log.info("No nodes found in ontology")
log.info("Ontology parsing complete in {}".format(
(time.clock() - start) * 1000)) | place the ontology graph into a set of custom data structures
for use by the validator | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemadef.py#L64-L87 | [
"def deepest_node((subj, pred, obj), graph):\n \"\"\"recurse down the tree and return a list of the most deeply nested\n child nodes of the given triple\"\"\"\n # i don't fully accept the premise that this docstring presents\n # i'm not a docstring literalist\n to_return = []\n\n def _deepest_node((subj, pred, obj), graph):\n children = []\n if isinstance(obj, rt.BNode):\n for s, p, o in graph:\n if str(s) == str(obj):\n children.append((s, p, o))\n for s, p, o in children:\n s1, p1, o1 = _deepest_node((s, p, o), graph)\n # coupling *smacks hand with ruler*\n if \"rNews\" in str(o1) and (s1, p1, o1) not in to_return:\n to_return.append((s1, p1, o1))\n return (s1, p1, o1)\n else:\n return (subj, pred, obj)\n _deepest_node((subj, pred, obj), graph)\n\n return to_return\n",
"def _schema_nodes(self):\n \"\"\"parse self._ontology_file into a graph\"\"\"\n name, ext = os.path.splitext(self._ontology_file)\n if ext in ['.ttl']:\n self._ontology_parser_function = \\\n lambda s: rdflib.Graph().parse(s, format='n3')\n else:\n self._ontology_parser_function = \\\n lambda s: pyRdfa().graph_from_source(s)\n if not self._ontology_parser_function:\n raise ValueError(\n \"No function found to parse ontology. %s\" %\n self.errorstring_base)\n if not self._ontology_file:\n raise ValueError(\n \"No ontology file specified. %s\" % self.errorstring_base)\n if not self.lexicon:\n raise ValueError(\n \"No lexicon object assigned. %s\" % self.errorstring_base)\n\n latest_file = self._read_schema()\n\n try:\n self.graph = self._ontology_parser_function(latest_file)\n except:\n raise IOError(\"Error parsing ontology at %s\" % latest_file)\n\n for subj, pred, obj in self.graph:\n self.ontology[subj].append((pred, obj))\n yield (subj, pred, obj)\n"
] | class SchemaDef(object):
"""class to handle the loading and caching of a standard
ASSUMPTIONS:
This class holds the definition of a standard
It does not perform any validation against that standard
This class knows nothing about the input file being validated
"""
errorstring_base = \
"Are you calling parse_ontology from the base SchemaDef class?"
def __init__(self):
super(SchemaDef, self).__init__()
self._ontology_file = ""
self.ontology = defaultdict(list)
self.attributes_by_class = {}
self._ontology_parser_function = None
self.lexicon = {}
def _read_schema(self):
"""return the local filename of the definition file for this schema
if not present or older than expiry, pull the latest version from
the web at self._ontology_file"""
cache_filename = os.path.join(
CACHE_ROOT, "%s.smt" % self._representation)
log.info("Attempting to read local schema at %s" % cache_filename)
try:
if time.time() - os.stat(cache_filename).st_mtime > CACHE_EXPIRY:
log.warning("Cache expired, re-pulling")
self._pull_schema_definition(cache_filename)
except OSError:
log.warning("Local schema not found. Pulling from web.")
self._pull_schema_definition(cache_filename)
else:
log.info("Success")
return cache_filename
def _pull_schema_definition(self, fname):
"""download an ontology definition from the web"""
std_url = urlopen(self._ontology_file)
cached_std = open(fname, "w+")
cached_std.write(std_url.read())
cached_std.close()
def _schema_nodes(self):
"""parse self._ontology_file into a graph"""
name, ext = os.path.splitext(self._ontology_file)
if ext in ['.ttl']:
self._ontology_parser_function = \
lambda s: rdflib.Graph().parse(s, format='n3')
else:
self._ontology_parser_function = \
lambda s: pyRdfa().graph_from_source(s)
if not self._ontology_parser_function:
raise ValueError(
"No function found to parse ontology. %s" %
self.errorstring_base)
if not self._ontology_file:
raise ValueError(
"No ontology file specified. %s" % self.errorstring_base)
if not self.lexicon:
raise ValueError(
"No lexicon object assigned. %s" % self.errorstring_base)
latest_file = self._read_schema()
try:
self.graph = self._ontology_parser_function(latest_file)
except:
raise IOError("Error parsing ontology at %s" % latest_file)
for subj, pred, obj in self.graph:
self.ontology[subj].append((pred, obj))
yield (subj, pred, obj)
|
Parsely/schemato | schemato/schemadef.py | SchemaDef._schema_nodes | python | def _schema_nodes(self):
name, ext = os.path.splitext(self._ontology_file)
if ext in ['.ttl']:
self._ontology_parser_function = \
lambda s: rdflib.Graph().parse(s, format='n3')
else:
self._ontology_parser_function = \
lambda s: pyRdfa().graph_from_source(s)
if not self._ontology_parser_function:
raise ValueError(
"No function found to parse ontology. %s" %
self.errorstring_base)
if not self._ontology_file:
raise ValueError(
"No ontology file specified. %s" % self.errorstring_base)
if not self.lexicon:
raise ValueError(
"No lexicon object assigned. %s" % self.errorstring_base)
latest_file = self._read_schema()
try:
self.graph = self._ontology_parser_function(latest_file)
except:
raise IOError("Error parsing ontology at %s" % latest_file)
for subj, pred, obj in self.graph:
self.ontology[subj].append((pred, obj))
yield (subj, pred, obj) | parse self._ontology_file into a graph | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemadef.py#L89-L118 | [
"def _read_schema(self):\n \"\"\"return the local filename of the definition file for this schema\n if not present or older than expiry, pull the latest version from\n the web at self._ontology_file\"\"\"\n cache_filename = os.path.join(\n CACHE_ROOT, \"%s.smt\" % self._representation)\n log.info(\"Attempting to read local schema at %s\" % cache_filename)\n try:\n if time.time() - os.stat(cache_filename).st_mtime > CACHE_EXPIRY:\n log.warning(\"Cache expired, re-pulling\")\n self._pull_schema_definition(cache_filename)\n except OSError:\n log.warning(\"Local schema not found. Pulling from web.\")\n self._pull_schema_definition(cache_filename)\n else:\n log.info(\"Success\")\n\n return cache_filename\n"
] | class SchemaDef(object):
"""class to handle the loading and caching of a standard
ASSUMPTIONS:
This class holds the definition of a standard
It does not perform any validation against that standard
This class knows nothing about the input file being validated
"""
errorstring_base = \
"Are you calling parse_ontology from the base SchemaDef class?"
def __init__(self):
super(SchemaDef, self).__init__()
self._ontology_file = ""
self.ontology = defaultdict(list)
self.attributes_by_class = {}
self._ontology_parser_function = None
self.lexicon = {}
def _read_schema(self):
"""return the local filename of the definition file for this schema
if not present or older than expiry, pull the latest version from
the web at self._ontology_file"""
cache_filename = os.path.join(
CACHE_ROOT, "%s.smt" % self._representation)
log.info("Attempting to read local schema at %s" % cache_filename)
try:
if time.time() - os.stat(cache_filename).st_mtime > CACHE_EXPIRY:
log.warning("Cache expired, re-pulling")
self._pull_schema_definition(cache_filename)
except OSError:
log.warning("Local schema not found. Pulling from web.")
self._pull_schema_definition(cache_filename)
else:
log.info("Success")
return cache_filename
def _pull_schema_definition(self, fname):
"""download an ontology definition from the web"""
std_url = urlopen(self._ontology_file)
cached_std = open(fname, "w+")
cached_std.write(std_url.read())
cached_std.close()
def parse_ontology(self):
"""place the ontology graph into a set of custom data structures
for use by the validator"""
start = time.clock()
log.info("Parsing ontology file for %s" % self.__class__.__name__)
for subj, pred, obj in self._schema_nodes():
if subj not in self.attributes_by_class:
if obj == rt.URIRef(self.lexicon['class']) and \
pred == rt.URIRef(self.lexicon['type']):
self.attributes_by_class[subj] = []
leaves = [(subj, pred, obj)]
if type(obj) == rt.BNode:
leaves = deepest_node((subj, pred, obj), self.graph)
for s, p, o in leaves:
if o not in self.attributes_by_class:
self.attributes_by_class[o] = []
if pred == rt.URIRef(self.lexicon['domain']):
self.attributes_by_class[o].append(subj)
if not self.attributes_by_class:
log.info("No nodes found in ontology")
log.info("Ontology parsing complete in {}".format(
(time.clock() - start) * 1000))
|
Parsely/schemato | schemato/utils.py | deepest_node | python | def deepest_node((subj, pred, obj), graph):
# i don't fully accept the premise that this docstring presents
# i'm not a docstring literalist
to_return = []
def _deepest_node((subj, pred, obj), graph):
children = []
if isinstance(obj, rt.BNode):
for s, p, o in graph:
if str(s) == str(obj):
children.append((s, p, o))
for s, p, o in children:
s1, p1, o1 = _deepest_node((s, p, o), graph)
# coupling *smacks hand with ruler*
if "rNews" in str(o1) and (s1, p1, o1) not in to_return:
to_return.append((s1, p1, o1))
return (s1, p1, o1)
else:
return (subj, pred, obj)
_deepest_node((subj, pred, obj), graph)
return to_return | recurse down the tree and return a list of the most deeply nested
child nodes of the given triple | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/utils.py#L4-L27 | [
"def _deepest_node((subj, pred, obj), graph):\n children = []\n if isinstance(obj, rt.BNode):\n for s, p, o in graph:\n if str(s) == str(obj):\n children.append((s, p, o))\n for s, p, o in children:\n s1, p1, o1 = _deepest_node((s, p, o), graph)\n # coupling *smacks hand with ruler*\n if \"rNews\" in str(o1) and (s1, p1, o1) not in to_return:\n to_return.append((s1, p1, o1))\n return (s1, p1, o1)\n else:\n return (subj, pred, obj)\n"
] | import rdflib.term as rt
|
Parsely/schemato | schemato/schemato.py | Schemato._document_lines | python | def _document_lines(self, text):
inlines = text.split('\n')
doc_lines = [(re.sub(r'^ +| +$', '', line), num)
for line, num
in zip(inlines, xrange(1, len(inlines) + 1))]
return doc_lines | helper, get a list of (linetext, linenum) from a string with
newlines | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemato.py#L64-L72 | null | class Schemato(object):
def __init__(self, source, url=None, loglevel="ERROR"):
"""init with a local filepath or a URI"""
super(Schemato, self).__init__()
self.set_loglevel(loglevel)
graph_source = source
if url is not None:
graph_source = StringIO(source)
self.graph = CompoundGraph(graph_source)
def _read_stream(source):
text, url = self._get_document(source)
return url, self._document_lines(text)
parsed_url, self.doc_lines = _read_stream(source)
self.url = url
if url is None:
self.url = parsed_url
validator = ParselyPageValidator(self.graph, self.doc_lines)
self.parsely_page = validator.data
def validate(self):
self._load_validators()
results = [v.validate() for v in self.validators]
log.info("returned from validate() : %s", results)
for res in results:
log.info(res.to_json())
return results
def set_loglevel(self, loglevel):
if hasattr(logging, loglevel):
log.setLevel(loglevel)
else:
log.setLevel(logging.ERROR)
log.error(
"Unrecognized loglevel %s, defaulting to ERROR", loglevel)
def _load_validators(self):
self.validators = set()
for entry_point in iter_entry_points('schemato_validators'):
validator_fn = entry_point.load()
validator = validator_fn(self.graph, self.doc_lines, url=self.url)
self.validators.add(validator)
def _get_document(self, source):
"""helper, open a file or url and return the content and identifier"""
scheme_url = source
if not source.startswith("http"):
scheme_url = "http://%s" % source
text = source
try:
text = urllib.urlopen(scheme_url).read()
except:
pass
else:
return (text, scheme_url)
try:
text = open(source, "r").read()
except:
pass
else:
return (text, source)
return (text, None)
|
Parsely/schemato | schemato/schemato.py | Schemato._get_document | python | def _get_document(self, source):
scheme_url = source
if not source.startswith("http"):
scheme_url = "http://%s" % source
text = source
try:
text = urllib.urlopen(scheme_url).read()
except:
pass
else:
return (text, scheme_url)
try:
text = open(source, "r").read()
except:
pass
else:
return (text, source)
return (text, None) | helper, open a file or url and return the content and identifier | train | https://github.com/Parsely/schemato/blob/7002316fbcd52f2e669f8372bf1338c572e3df4b/schemato/schemato.py#L74-L96 | null | class Schemato(object):
def __init__(self, source, url=None, loglevel="ERROR"):
"""init with a local filepath or a URI"""
super(Schemato, self).__init__()
self.set_loglevel(loglevel)
graph_source = source
if url is not None:
graph_source = StringIO(source)
self.graph = CompoundGraph(graph_source)
def _read_stream(source):
text, url = self._get_document(source)
return url, self._document_lines(text)
parsed_url, self.doc_lines = _read_stream(source)
self.url = url
if url is None:
self.url = parsed_url
validator = ParselyPageValidator(self.graph, self.doc_lines)
self.parsely_page = validator.data
def validate(self):
self._load_validators()
results = [v.validate() for v in self.validators]
log.info("returned from validate() : %s", results)
for res in results:
log.info(res.to_json())
return results
def set_loglevel(self, loglevel):
if hasattr(logging, loglevel):
log.setLevel(loglevel)
else:
log.setLevel(logging.ERROR)
log.error(
"Unrecognized loglevel %s, defaulting to ERROR", loglevel)
def _load_validators(self):
self.validators = set()
for entry_point in iter_entry_points('schemato_validators'):
validator_fn = entry_point.load()
validator = validator_fn(self.graph, self.doc_lines, url=self.url)
self.validators.add(validator)
def _document_lines(self, text):
"""helper, get a list of (linetext, linenum) from a string with
newlines
"""
inlines = text.split('\n')
doc_lines = [(re.sub(r'^ +| +$', '', line), num)
for line, num
in zip(inlines, xrange(1, len(inlines) + 1))]
return doc_lines
|
rycus86/ghost-client | ghost_client/models.py | ModelList.get_page | python | def get_page(self, page_number):
if page_number:
kwargs = dict(self._list_kwargs)
kwargs['limit'] = self.limit
kwargs['page'] = page_number
return self._controller.list(**kwargs) | :param page_number: The page number to fetch (1-indexed)
:return: The requested page fetched from the API for the query | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L109-L120 | null | class ModelList(list):
"""
Wrapper around lists returned by the API.
Exposes methods related to pagination and
wraps each item in their respective model type.
"""
def __init__(self, data, type_name, controller, list_kwargs, model_type=Model):
"""
Enhances a regular list.
:param data: The original iterable
:param type_name: The name of the type as the API knows it
:param controller: The controller that returned the list
:param list_kwargs: Parameters to use when fetching pages from the API
:param model_type: The model type of the items
"""
super(ModelList, self).__init__(map(model_type, data[type_name]))
self.meta = data['meta']['pagination']
self._controller = controller
self._list_kwargs = list_kwargs
@property
def total(self):
"""
:return: The total number of results available for the query
"""
return self.meta['total']
@property
def pages(self):
"""
:return: The number of pages available for the query
"""
return self.meta['pages']
@property
def limit(self):
"""
:return: The limit used for queries
"""
return self.meta['limit']
def next_page(self):
"""
:return: The next page fetched from the API for the query
"""
return self.get_page(self.meta['next'])
def prev_page(self):
"""
:return: The previous page fetched from the API for the query
"""
return self.get_page(self.meta['prev'])
|
rycus86/ghost-client | ghost_client/models.py | Controller.list | python | def list(self, **kwargs):
return ModelList(
self.ghost.execute_get('%s/' % self._type_name, **kwargs),
self._type_name, self, kwargs, model_type=self._model_type
) | Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList` | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L141-L154 | null | class Controller(object):
"""
The API controller dealing with requests for a specific type.
"""
def __init__(self, ghost, type_name, model_type=Model):
"""
Initializes a new controller.
:param ghost: An instance of the API client
:param type_name: The type name as the API knows it
:param model_type: The model type to wrap response items as
"""
self.ghost = ghost
self._type_name = type_name
self._model_type = model_type
def get(self, id=None, slug=None, **kwargs):
"""
Fetch a resource from the API.
Either the `id` or the `slug` has to be present.
:param id: The ID of the resource
:param slug: The slug of the resource
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The item returned by the API
wrapped as a `Model` object
"""
if id:
items = self.ghost.execute_get('%s/%s/' % (self._type_name, id), **kwargs)
elif slug:
items = self.ghost.execute_get('%s/slug/%s/' % (self._type_name, slug), **kwargs)
else:
raise GhostException(
500, 'Either the ID or the Slug of the resource needs to be specified'
)
return self._model_type(items[self._type_name][0])
def create(self, **kwargs):
"""
Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0])
def update(self, id, **kwargs):
"""
Updates an existing resource.
:param id: The ID of the resource
:param kwargs: The properties of the resource to change
:return: The updated item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_put('%s/%s/' % (self._type_name, id), json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0])
def delete(self, id):
"""
Deletes an existing resource.
Does not return anything but raises an exception when failed.
:param id: The ID of the resource
"""
self.ghost.execute_delete('%s/%s/' % (self._type_name, id))
|
rycus86/ghost-client | ghost_client/models.py | Controller.get | python | def get(self, id=None, slug=None, **kwargs):
if id:
items = self.ghost.execute_get('%s/%s/' % (self._type_name, id), **kwargs)
elif slug:
items = self.ghost.execute_get('%s/slug/%s/' % (self._type_name, slug), **kwargs)
else:
raise GhostException(
500, 'Either the ID or the Slug of the resource needs to be specified'
)
return self._model_type(items[self._type_name][0]) | Fetch a resource from the API.
Either the `id` or the `slug` has to be present.
:param id: The ID of the resource
:param slug: The slug of the resource
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The item returned by the API
wrapped as a `Model` object | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L156-L180 | null | class Controller(object):
"""
The API controller dealing with requests for a specific type.
"""
def __init__(self, ghost, type_name, model_type=Model):
"""
Initializes a new controller.
:param ghost: An instance of the API client
:param type_name: The type name as the API knows it
:param model_type: The model type to wrap response items as
"""
self.ghost = ghost
self._type_name = type_name
self._model_type = model_type
def list(self, **kwargs):
"""
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
"""
return ModelList(
self.ghost.execute_get('%s/' % self._type_name, **kwargs),
self._type_name, self, kwargs, model_type=self._model_type
)
def create(self, **kwargs):
"""
Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0])
def update(self, id, **kwargs):
"""
Updates an existing resource.
:param id: The ID of the resource
:param kwargs: The properties of the resource to change
:return: The updated item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_put('%s/%s/' % (self._type_name, id), json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0])
def delete(self, id):
"""
Deletes an existing resource.
Does not return anything but raises an exception when failed.
:param id: The ID of the resource
"""
self.ghost.execute_delete('%s/%s/' % (self._type_name, id))
|
rycus86/ghost-client | ghost_client/models.py | Controller.create | python | def create(self, **kwargs):
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0]) | Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L182-L197 | null | class Controller(object):
"""
The API controller dealing with requests for a specific type.
"""
def __init__(self, ghost, type_name, model_type=Model):
"""
Initializes a new controller.
:param ghost: An instance of the API client
:param type_name: The type name as the API knows it
:param model_type: The model type to wrap response items as
"""
self.ghost = ghost
self._type_name = type_name
self._model_type = model_type
def list(self, **kwargs):
"""
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
"""
return ModelList(
self.ghost.execute_get('%s/' % self._type_name, **kwargs),
self._type_name, self, kwargs, model_type=self._model_type
)
def get(self, id=None, slug=None, **kwargs):
"""
Fetch a resource from the API.
Either the `id` or the `slug` has to be present.
:param id: The ID of the resource
:param slug: The slug of the resource
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The item returned by the API
wrapped as a `Model` object
"""
if id:
items = self.ghost.execute_get('%s/%s/' % (self._type_name, id), **kwargs)
elif slug:
items = self.ghost.execute_get('%s/slug/%s/' % (self._type_name, slug), **kwargs)
else:
raise GhostException(
500, 'Either the ID or the Slug of the resource needs to be specified'
)
return self._model_type(items[self._type_name][0])
def update(self, id, **kwargs):
"""
Updates an existing resource.
:param id: The ID of the resource
:param kwargs: The properties of the resource to change
:return: The updated item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_put('%s/%s/' % (self._type_name, id), json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0])
def delete(self, id):
"""
Deletes an existing resource.
Does not return anything but raises an exception when failed.
:param id: The ID of the resource
"""
self.ghost.execute_delete('%s/%s/' % (self._type_name, id))
|
rycus86/ghost-client | ghost_client/models.py | Controller.update | python | def update(self, id, **kwargs):
response = self.ghost.execute_put('%s/%s/' % (self._type_name, id), json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0]) | Updates an existing resource.
:param id: The ID of the resource
:param kwargs: The properties of the resource to change
:return: The updated item returned by the API
wrapped as a `Model` object | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L199-L215 | null | class Controller(object):
"""
The API controller dealing with requests for a specific type.
"""
def __init__(self, ghost, type_name, model_type=Model):
"""
Initializes a new controller.
:param ghost: An instance of the API client
:param type_name: The type name as the API knows it
:param model_type: The model type to wrap response items as
"""
self.ghost = ghost
self._type_name = type_name
self._model_type = model_type
def list(self, **kwargs):
"""
Fetch a list of resources from the API.
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The list of items returned by the API
wrapped as `Model` objects with pagination by `ModelList`
"""
return ModelList(
self.ghost.execute_get('%s/' % self._type_name, **kwargs),
self._type_name, self, kwargs, model_type=self._model_type
)
def get(self, id=None, slug=None, **kwargs):
"""
Fetch a resource from the API.
Either the `id` or the `slug` has to be present.
:param id: The ID of the resource
:param slug: The slug of the resource
:param kwargs: Parameters for the request
(see from and below https://api.ghost.org/docs/limit)
:return: The item returned by the API
wrapped as a `Model` object
"""
if id:
items = self.ghost.execute_get('%s/%s/' % (self._type_name, id), **kwargs)
elif slug:
items = self.ghost.execute_get('%s/slug/%s/' % (self._type_name, slug), **kwargs)
else:
raise GhostException(
500, 'Either the ID or the Slug of the resource needs to be specified'
)
return self._model_type(items[self._type_name][0])
def create(self, **kwargs):
"""
Creates a new resource.
:param kwargs: The properties of the resource
:return: The created item returned by the API
wrapped as a `Model` object
"""
response = self.ghost.execute_post('%s/' % self._type_name, json={
self._type_name: [
kwargs
]
})
return self._model_type(response.get(self._type_name)[0])
def delete(self, id):
"""
Deletes an existing resource.
Does not return anything but raises an exception when failed.
:param id: The ID of the resource
"""
self.ghost.execute_delete('%s/%s/' % (self._type_name, id))
|
rycus86/ghost-client | ghost_client/models.py | PostController.create | python | def create(self, **kwargs):
return super(PostController, self).create(**self._with_markdown(kwargs)) | Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L242-L252 | [
"def create(self, **kwargs):\n \"\"\"\n Creates a new resource.\n\n :param kwargs: The properties of the resource\n :return: The created item returned by the API\n wrapped as a `Model` object\n \"\"\"\n\n response = self.ghost.execute_post('%s/' % self._type_name, json={\n self._type_name: [\n kwargs\n ]\n })\n\n return self._model_type(response.get(self._type_name)[0])\n",
"def _with_markdown(self, kwargs):\n markdown = kwargs.pop('markdown', None)\n\n if markdown:\n if self.ghost.version.startswith('0'):\n # put it back as is for version 0.x\n kwargs['markdown'] = markdown\n\n else:\n updated = dict(kwargs)\n updated['mobiledoc'] = json.dumps({\n \"version\": \"0.3.1\", \"markups\": [], \"atoms\": [],\n \"cards\": [[\"card-markdown\", {\"cardName\": \"card-markdown\", \"markdown\": markdown}]],\n \"sections\": [[10, 0]]})\n return updated\n\n return kwargs\n"
] | class PostController(Controller):
"""
Controller extension for managing posts.
"""
def __init__(self, ghost):
"""
Initialize a new controller for posts.
:param ghost: An instance of the API client
"""
super(PostController, self).__init__(ghost, 'posts', model_type=Post)
def update(self, id, **kwargs):
"""
Updates an existing post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param id: The ID of the existing post
:param kwargs: The properties of the post to change
:return: The updated `Post` object
"""
return super(PostController, self).update(id, **self._with_markdown(kwargs))
def _with_markdown(self, kwargs):
markdown = kwargs.pop('markdown', None)
if markdown:
if self.ghost.version.startswith('0'):
# put it back as is for version 0.x
kwargs['markdown'] = markdown
else:
updated = dict(kwargs)
updated['mobiledoc'] = json.dumps({
"version": "0.3.1", "markups": [], "atoms": [],
"cards": [["card-markdown", {"cardName": "card-markdown", "markdown": markdown}]],
"sections": [[10, 0]]})
return updated
return kwargs
|
rycus86/ghost-client | ghost_client/models.py | PostController.update | python | def update(self, id, **kwargs):
return super(PostController, self).update(id, **self._with_markdown(kwargs)) | Updates an existing post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param id: The ID of the existing post
:param kwargs: The properties of the post to change
:return: The updated `Post` object | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/models.py#L254-L265 | [
"def update(self, id, **kwargs):\n \"\"\"\n Updates an existing resource.\n\n :param id: The ID of the resource\n :param kwargs: The properties of the resource to change\n :return: The updated item returned by the API\n wrapped as a `Model` object\n \"\"\"\n\n response = self.ghost.execute_put('%s/%s/' % (self._type_name, id), json={\n self._type_name: [\n kwargs\n ]\n })\n\n return self._model_type(response.get(self._type_name)[0])\n",
"def _with_markdown(self, kwargs):\n markdown = kwargs.pop('markdown', None)\n\n if markdown:\n if self.ghost.version.startswith('0'):\n # put it back as is for version 0.x\n kwargs['markdown'] = markdown\n\n else:\n updated = dict(kwargs)\n updated['mobiledoc'] = json.dumps({\n \"version\": \"0.3.1\", \"markups\": [], \"atoms\": [],\n \"cards\": [[\"card-markdown\", {\"cardName\": \"card-markdown\", \"markdown\": markdown}]],\n \"sections\": [[10, 0]]})\n return updated\n\n return kwargs\n"
] | class PostController(Controller):
"""
Controller extension for managing posts.
"""
def __init__(self, ghost):
"""
Initialize a new controller for posts.
:param ghost: An instance of the API client
"""
super(PostController, self).__init__(ghost, 'posts', model_type=Post)
def create(self, **kwargs):
"""
Creates a new post.
When the `markdown` property is present, it will be
automatically converted to `mobiledoc` on v1.+ of the server.
:param kwargs: The properties of the post
:return: The created `Post` object
"""
return super(PostController, self).create(**self._with_markdown(kwargs))
def _with_markdown(self, kwargs):
markdown = kwargs.pop('markdown', None)
if markdown:
if self.ghost.version.startswith('0'):
# put it back as is for version 0.x
kwargs['markdown'] = markdown
else:
updated = dict(kwargs)
updated['mobiledoc'] = json.dumps({
"version": "0.3.1", "markups": [], "atoms": [],
"cards": [["card-markdown", {"cardName": "card-markdown", "markdown": markdown}]],
"sections": [[10, 0]]})
return updated
return kwargs
|
rycus86/ghost-client | ghost_client/helpers.py | refresh_session_if_necessary | python | def refresh_session_if_necessary(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
try:
result = f(self, *args, **kwargs)
except Exception as ex:
if hasattr(ex, 'code') and ex.code in (401, 403):
self.refresh_session()
# retry now
result = f(self, *args, **kwargs)
else:
raise ex
return result
return wrapped | Decorator to use on methods that are allowed
to retry the request after reauthenticating the client.
:param f: The original function
:return: The decorated function | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/helpers.py#L4-L27 | null | import functools
|
rycus86/ghost-client | ghost_client/api.py | Ghost.from_sqlite | python | def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close() | Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L142-L180 | null | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.version | python | def version(self):
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version | :return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L183-L199 | null | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.login | python | def login(self, username, password):
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data | Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L201-L221 | [
"def _authenticate(self, **kwargs):\n response = requests.post(\n '%s/authentication/token' % self.base_url, data=kwargs\n )\n\n if response.status_code != 200:\n raise GhostException(response.status_code, response.json().get('errors', []))\n\n data = response.json()\n\n self._access_token = data.get('access_token')\n self._refresh_token = data.get('refresh_token')\n\n return data\n"
] | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.refresh_session | python | def refresh_session(self):
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
) | Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L223-L243 | [
"def login(self, username, password):\n \"\"\"\n Authenticate with the server.\n\n :param username: The username of an existing user\n :param password: The password for the user\n :return: The authentication response from the REST endpoint\n \"\"\"\n\n data = self._authenticate(\n grant_type='password',\n username=username,\n password=password,\n client_id=self._client_id,\n client_secret=self._client_secret\n )\n\n self._username = username\n self._password = password\n\n return data\n",
"def _authenticate(self, **kwargs):\n response = requests.post(\n '%s/authentication/token' % self.base_url, data=kwargs\n )\n\n if response.status_code != 200:\n raise GhostException(response.status_code, response.json().get('errors', []))\n\n data = response.json()\n\n self._access_token = data.get('access_token')\n self._refresh_token = data.get('refresh_token')\n\n return data\n"
] | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.revoke_access_token | python | def revoke_access_token(self):
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None | Revoke the access token currently in use. | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L260-L273 | [
"def execute_post(self, resource, **kwargs):\n \"\"\"\n Execute an HTTP POST request against the API endpoints.\n This method is meant for internal use.\n\n :param resource: The last part of the URI\n :param kwargs: Additional parameters for the HTTP call (`request` library)\n :return: The HTTP response as JSON or `GhostException` if unsuccessful\n \"\"\"\n\n return self._request(resource, requests.post, **kwargs).json()\n"
] | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.revoke_refresh_token | python | def revoke_refresh_token(self):
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None | Revoke the refresh token currently active. | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L275-L288 | [
"def execute_post(self, resource, **kwargs):\n \"\"\"\n Execute an HTTP POST request against the API endpoints.\n This method is meant for internal use.\n\n :param resource: The last part of the URI\n :param kwargs: Additional parameters for the HTTP call (`request` library)\n :return: The HTTP response as JSON or `GhostException` if unsuccessful\n \"\"\"\n\n return self._request(resource, requests.post, **kwargs).json()\n"
] | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.logout | python | def logout(self):
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None | Log out, revoking the access tokens
and forgetting the login details if they were given. | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L290-L299 | [
"def revoke_access_token(self):\n \"\"\"\n Revoke the access token currently in use.\n \"\"\"\n\n if not self._access_token:\n return\n\n self.execute_post('authentication/revoke', json=dict(\n token_type_hint='access_token',\n token=self._access_token\n ))\n\n self._access_token = None\n",
"def revoke_refresh_token(self):\n \"\"\"\n Revoke the refresh token currently active.\n \"\"\"\n\n if not self._refresh_token:\n return\n\n self.execute_post('authentication/revoke', json=dict(\n token_type_hint='refresh_token',\n token=self._refresh_token\n ))\n\n self._refresh_token = None\n"
] | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.upload | python | def upload(self, file_obj=None, file_path=None, name=None, data=None):
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close() | Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L301-L343 | [
"def execute_post(self, resource, **kwargs):\n \"\"\"\n Execute an HTTP POST request against the API endpoints.\n This method is meant for internal use.\n\n :param resource: The last part of the URI\n :param kwargs: Additional parameters for the HTTP call (`request` library)\n :return: The HTTP response as JSON or `GhostException` if unsuccessful\n \"\"\"\n\n return self._request(resource, requests.post, **kwargs).json()\n"
] | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.execute_get | python | def execute_get(self, resource, **kwargs):
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json() | Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L346-L389 | null | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.execute_post | python | def execute_post(self, resource, **kwargs):
return self._request(resource, requests.post, **kwargs).json() | Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L391-L401 | null | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.execute_put | python | def execute_put(self, resource, **kwargs):
return self._request(resource, requests.put, **kwargs).json() | Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L403-L413 | null | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs)
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
rycus86/ghost-client | ghost_client/api.py | Ghost.execute_delete | python | def execute_delete(self, resource, **kwargs):
self._request(resource, requests.delete, **kwargs) | Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library) | train | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L415-L425 | null | class Ghost(object):
"""
API client for the Ghost REST endpoints.
See https://api.ghost.org/ for the available parameters.
Sample usage:
from ghost_client import Ghost
# to read the client ID and secret from the database
ghost = Ghost.from_sqlite(
'/var/lib/ghost/content/data/ghost.db',
'http://localhost:2368'
)
# or to use a specific client ID and secret
ghost = Ghost(
'http://localhost:2368',
client_id='ghost-admin', client_secret='secret_key'
)
# log in
ghost.login('username', 'password')
# print the server's version
print(ghost.version)
# create a new tag
tag = ghost.tags.create(name='API sample')
# create a new post using it
post = ghost.posts.create(
title='Example post', slug='custom-slug',
markdown='', # yes, even on v1.+
custom_excerpt='An example post created from Python',
tags=[tag]
)
# list posts, tags and users
posts = ghost.posts.list(
status='all',
fields=('id', 'title', 'slug'),
formats=('html', 'mobiledoc', 'plaintext'),
)
tags = ghost.tags.list(fields='name', limit='all')
users = ghost.users.list(include='count.posts')
# use pagination
while posts:
for post in posts:
print(post)
posts = posts.next_page()
print(posts.total)
print(posts.pages)
# update a post & tag
updated_post = ghost.posts.update(post.id, title='Updated title')
updated_tag = ghost.tags.update(tag.id, name='Updated tag')
# note: creating, updating and deleting a user is not allowed by the API
# access fields as properties
print(post.title)
print(post.markdown) # needs formats='mobiledoc'
print(post.author.name) # needs include='author'
# delete a post & tag
ghost.posts.delete(post.id)
ghost.tags.delete(tag.id)
# upload an image
ghost.upload(file_obj=open('sample.png', 'rb'))
ghost.upload(file_path='/path/to/image.jpeg', 'rb')
ghost.upload(name='image.gif', data=open('local.gif', 'rb').read())
# log out
ghost.logout()
The logged in credentials will be saved in memory and
on HTTP 401 errors the client will attempt
to re-authenticate once automatically.
Responses are wrapped in `models.ModelList` and `models.Model`
types to allow pagination and retrieving fields as properties.
"""
DEFAULT_VERSION = '1'
"""
The default version to report when cannot be fetched.
"""
def __init__(
self, base_url, version='auto',
client_id=None, client_secret=None,
access_token=None, refresh_token=None
):
"""
Creates a new Ghost API client.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: Self-supplied client ID (optional)
:param client_secret: Self-supplied client secret (optional)
:param access_token: Self-supplied access token (optional)
:param refresh_token: Self-supplied refresh token (optional)
"""
self.base_url = '%s/ghost/api/v0.1' % base_url
self._version = version
self._client_id = client_id
self._client_secret = client_secret
self._access_token = access_token
self._refresh_token = refresh_token
self._username = None
self._password = None
if not self._client_id or not self._client_secret:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_id or client_secret given or found'
}])
self.posts = PostController(self)
self.tags = Controller(self, 'tags')
self.users = Controller(self, 'users')
@classmethod
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
@property
def version(self):
"""
:return: The version of the server when initialized as 'auto',
otherwise the version passed in at initialization
"""
if self._version != 'auto':
return self._version
if self._version == 'auto':
try:
data = self.execute_get('configuration/about/')
self._version = data['configuration'][0]['version']
except GhostException:
return self.DEFAULT_VERSION
return self._version
def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data
def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
)
def _authenticate(self, **kwargs):
response = requests.post(
'%s/authentication/token' % self.base_url, data=kwargs
)
if response.status_code != 200:
raise GhostException(response.status_code, response.json().get('errors', []))
data = response.json()
self._access_token = data.get('access_token')
self._refresh_token = data.get('refresh_token')
return data
def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None
def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None
def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None
def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close()
@refresh_session_if_necessary
def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json()
def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json()
def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json()
@refresh_session_if_necessary
def _request(self, resource, request, **kwargs):
if not self._access_token:
raise GhostException(401, [{
'errorType': 'ClientError',
'message': 'Access token not found'
}])
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
if 'json' in kwargs:
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
response = request(url, headers=headers, **kwargs)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response
|
vmonaco/pohmm | pohmm/classification.py | PohmmClassifier.fit | python | def fit(self, labels, samples, pstates):
assert len(labels) == len(samples) == len(pstates)
for label in set(labels):
label_samples = [s for l,s in zip(labels, samples) if l == label]
label_pstates = [p for l,p in zip(labels, pstates) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit(label_samples, label_pstates)
self.pohmms[label] = pohmm
return self | Fit the classifier with labels y and observations X | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L16-L30 | null | class PohmmClassifier(object):
"""
Train a POHMM for each label and make predictions by choosing the maximum likelihood model
"""
def __init__(self, pohmm_factory):
self.pohmms = {}
self.pohmm_factory = pohmm_factory
return
def fit_df(self, labels, dfs, pstate_col=PSTATE_COL):
"""
Fit the classifier with labels y and DataFrames dfs
"""
assert len(labels) == len(dfs)
for label in set(labels):
label_dfs = [s for l,s in zip(labels, dfs) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit_df(label_dfs, pstate_col=pstate_col)
self.pohmms[label] = pohmm
return self
def predict(self, sample, pstates):
"""
Predict the class label of X
"""
scores = {}
for label, pohmm in self.pohmms.items():
scores[label] = pohmm.score(sample, pstates)
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores
def predict_df(self, df, pstate_col=PSTATE_COL):
"""
Predict the class label of DataFrame df
"""
scores = {}
for label, pohmm in self.pohmms.items():
scores[label] = pohmm.score_df(df, pstate_col=pstate_col)
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores
|
vmonaco/pohmm | pohmm/classification.py | PohmmClassifier.fit_df | python | def fit_df(self, labels, dfs, pstate_col=PSTATE_COL):
assert len(labels) == len(dfs)
for label in set(labels):
label_dfs = [s for l,s in zip(labels, dfs) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit_df(label_dfs, pstate_col=pstate_col)
self.pohmms[label] = pohmm
return self | Fit the classifier with labels y and DataFrames dfs | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L32-L45 | [
"def keystroke_model():\n \"\"\"Generates a 2-state model with lognormal emissions and frequency smoothing\"\"\"\n model = Pohmm(n_hidden_states=2,\n init_spread=2,\n emissions=['lognormal', 'lognormal'],\n smoothing='freq',\n init_method='obs',\n thresh=1)\n return model\n"
] | class PohmmClassifier(object):
"""
Train a POHMM for each label and make predictions by choosing the maximum likelihood model
"""
def __init__(self, pohmm_factory):
self.pohmms = {}
self.pohmm_factory = pohmm_factory
return
def fit(self, labels, samples, pstates):
"""
Fit the classifier with labels y and observations X
"""
assert len(labels) == len(samples) == len(pstates)
for label in set(labels):
label_samples = [s for l,s in zip(labels, samples) if l == label]
label_pstates = [p for l,p in zip(labels, pstates) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit(label_samples, label_pstates)
self.pohmms[label] = pohmm
return self
def predict(self, sample, pstates):
"""
Predict the class label of X
"""
scores = {}
for label, pohmm in self.pohmms.items():
scores[label] = pohmm.score(sample, pstates)
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores
def predict_df(self, df, pstate_col=PSTATE_COL):
"""
Predict the class label of DataFrame df
"""
scores = {}
for label, pohmm in self.pohmms.items():
scores[label] = pohmm.score_df(df, pstate_col=pstate_col)
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores
|
vmonaco/pohmm | pohmm/classification.py | PohmmClassifier.predict | python | def predict(self, sample, pstates):
scores = {}
for label, pohmm in self.pohmms.items():
scores[label] = pohmm.score(sample, pstates)
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores | Predict the class label of X | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L47-L55 | null | class PohmmClassifier(object):
"""
Train a POHMM for each label and make predictions by choosing the maximum likelihood model
"""
def __init__(self, pohmm_factory):
self.pohmms = {}
self.pohmm_factory = pohmm_factory
return
def fit(self, labels, samples, pstates):
"""
Fit the classifier with labels y and observations X
"""
assert len(labels) == len(samples) == len(pstates)
for label in set(labels):
label_samples = [s for l,s in zip(labels, samples) if l == label]
label_pstates = [p for l,p in zip(labels, pstates) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit(label_samples, label_pstates)
self.pohmms[label] = pohmm
return self
def fit_df(self, labels, dfs, pstate_col=PSTATE_COL):
"""
Fit the classifier with labels y and DataFrames dfs
"""
assert len(labels) == len(dfs)
for label in set(labels):
label_dfs = [s for l,s in zip(labels, dfs) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit_df(label_dfs, pstate_col=pstate_col)
self.pohmms[label] = pohmm
return self
def predict_df(self, df, pstate_col=PSTATE_COL):
"""
Predict the class label of DataFrame df
"""
scores = {}
for label, pohmm in self.pohmms.items():
scores[label] = pohmm.score_df(df, pstate_col=pstate_col)
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores
|
vmonaco/pohmm | pohmm/classification.py | PohmmClassifier.predict_df | python | def predict_df(self, df, pstate_col=PSTATE_COL):
scores = {}
for label, pohmm in self.pohmms.items():
scores[label] = pohmm.score_df(df, pstate_col=pstate_col)
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores | Predict the class label of DataFrame df | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/classification.py#L57-L65 | null | class PohmmClassifier(object):
"""
Train a POHMM for each label and make predictions by choosing the maximum likelihood model
"""
def __init__(self, pohmm_factory):
self.pohmms = {}
self.pohmm_factory = pohmm_factory
return
def fit(self, labels, samples, pstates):
"""
Fit the classifier with labels y and observations X
"""
assert len(labels) == len(samples) == len(pstates)
for label in set(labels):
label_samples = [s for l,s in zip(labels, samples) if l == label]
label_pstates = [p for l,p in zip(labels, pstates) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit(label_samples, label_pstates)
self.pohmms[label] = pohmm
return self
def fit_df(self, labels, dfs, pstate_col=PSTATE_COL):
"""
Fit the classifier with labels y and DataFrames dfs
"""
assert len(labels) == len(dfs)
for label in set(labels):
label_dfs = [s for l,s in zip(labels, dfs) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit_df(label_dfs, pstate_col=pstate_col)
self.pohmms[label] = pohmm
return self
def predict(self, sample, pstates):
"""
Predict the class label of X
"""
scores = {}
for label, pohmm in self.pohmms.items():
scores[label] = pohmm.score(sample, pstates)
max_score_label = max(scores.items(), key=itemgetter(1))[0]
return max_score_label, scores
|
vmonaco/pohmm | pohmm/pohmm.py | Pohmm.rand | python | def rand(self, unique_pstates, random_state=None):
self._init_pstates(unique_pstates)
self._init_random(random_state=random_state)
self._compute_marginals()
return self | Randomize the POHMM parameters | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L662-L669 | [
"def _init_pstates(self, unique_pstates):\n # Map events to a unique index. The unknown p-state is at idx 0\n self.e = defaultdict(int)\n self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))\n self.er = {v: k for k, v in self.e.items()} # Reverse lookup\n self.er[0] = UNKNOWN_PSTATE\n self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state\n return\n",
"def _init_random(self, random_state=None):\n\n if random_state is None:\n random_state = self.random_state\n random_state = check_random_state(random_state)\n\n self.pstate_startprob = np.zeros(self.n_partial_states)\n self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))\n self.pstate_steadyprob = np.zeros(self.n_partial_states)\n\n self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)\n self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(\n size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)\n self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])\n\n self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),\n random_state=random_state)\n\n transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))\n for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):\n transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),\n random_state=random_state)\n self.transmat = normalize(transmat, axis=3)\n\n # Initialize emission parameters\n for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):\n if feature_distr == 'normal':\n self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(\n self.n_partial_states, self.n_hidden_states))\n self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(\n self.n_partial_states, self.n_hidden_states))\n if feature_distr == 'lognormal':\n self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(\n self.n_partial_states, self.n_hidden_states))\n self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],\n size=(self.n_partial_states,\n self.n_hidden_states))\n\n if self.emission_distr[0] == 'normal':\n self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)\n elif self.emission_distr[0] == 'lognormal':\n self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],\n axis=1)\n\n return\n",
"def _compute_marginals(self):\n # TODO: cythonize some of this\n\n # Start prob, weighted by p-state start probs\n self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)\n\n # Use the p-state transmat and transmat to get the full transmat\n full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])\n full_steadyprob = steadystate(full_transmat)\n\n # Steady state probas are determined by the full trans mat, need to be updated\n steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))\n steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)\n for i in range(self.n_partial_states - 1):\n steadyprob[i + 1] = normalize(\n full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])\n\n self.steadyprob = steadyprob\n\n # Update the transations to/from the marginal state\n transmat = self.transmat\n # Group the hidden states within each partial state\n for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):\n transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()\n for pidx in range(self.n_partial_states - 1):\n transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,\n hidx2::self.n_hidden_states].sum()\n transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,\n pidx * self.n_hidden_states + hidx2].sum()\n self.transmat = normalize(transmat, axis=3)\n\n pweights = self.pstate_steadyprob[1:, np.newaxis]\n # Update emission parameters\n for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):\n if feature_distr == 'normal':\n # Marginal state is a mixture of normals\n mu = self.emission[feature_name]['mu'][1:, :]\n sigma = self.emission[feature_name]['sigma'][1:, :]\n\n # Weighted mean and var\n mu_0 = (pweights * mu).sum(axis=0)\n self.emission[feature_name]['mu'][0, :] = mu_0\n self.emission[feature_name]['sigma'][0, :] = np.sqrt(\n (pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))\n\n if feature_distr == 'lognormal':\n # Marginal state is a mixture of normals\n mu = self.emission[feature_name]['logmu'][1:, :]\n sigma = self.emission[feature_name]['logsigma'][1:, :]\n\n # Weighted mean and var\n mu_0 = (pweights * mu).sum(axis=0)\n self.emission[feature_name]['logmu'][0, :] = mu_0\n self.emission[feature_name]['logsigma'][0, :] = np.sqrt(\n (pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))\n\n return\n"
] | class Pohmm(object):
"""
Partially observable hidden Markov model
"""
def __init__(self,
n_hidden_states=2,
emissions=['normal'],
max_iter=1000,
thresh=1e-6,
init_method='obs',
init_spread=2,
smoothing=None,
random_state=None):
if type(n_hidden_states) is int:
self.n_hidden_states = n_hidden_states
else:
raise Exception('Wrong type for n_hidden_states. Must be int')
if type(emissions[0]) is tuple:
emission_name, emission_distr = zip(*emissions)
elif type(emissions[0]) is str:
emission_name, emission_distr = np.arange(len(emissions)), emissions
for distr in emission_distr:
if distr not in _DISTRIBUTIONS.keys():
raise ValueError('Emission distribution must be one of', _DISTRIBUTIONS.keys())
self.emission_name = emission_name
self.emission_distr = emission_distr
self.emission_name_distr = dict(zip(emission_name, emission_distr))
self.n_features = len(emissions)
# Set up the emission parameters
# emission: {'feature':{'param': np.array(shape=(n_partial_states, n_hidden_states))}}
self.emission = defaultdict(dict)
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
self.emission[name][param] = None
self.emission = dict(self.emission)
assert max_iter >= 0
assert thresh >= 0
self.max_iter = max_iter
self.thresh = thresh
assert init_spread >= 0
self.init_spread = init_spread
if init_method not in _INIT_METHODS:
raise ValueError('init_method must be one of', _INIT_METHODS)
self.init_method = init_method
if smoothing is None:
smoothing = {'transmat': None, 'startprob': None}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = None
elif type(smoothing) is str:
s = smoothing
smoothing = {'transmat': s, 'startprob': s}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = s
elif type(smoothing) is dict:
assert 'transmat' in smoothing.keys()
assert 'startprob' in smoothing.keys()
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
assert param in smoothing.keys() or '%s:%s' % (name, param) in smoothing.keys()
if param in smoothing.keys() and '%s:%s' % (name, param) not in smoothing.keys():
smoothing['%s:%s' % (name, param)] = smoothing[param]
else:
raise Exception('Wrong type for smoothing. Must be None, str, or dict')
self.smoothing = smoothing
self.random_state = random_state
# Number of unique partial states is unknown until fit
self.n_partial_states = None
# Results after fitting the model
self.logprob_ = None
self.n_iter_performed_ = None
self.logprob_delta_ = None
# Mapping between p-states and a unique index
# Defaults to 0 for unknown or missing p-states
self.e = defaultdict(int)
def _get_startprob(self):
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
startprob = normalize(startprob, axis=1)
if len(startprob) != self.n_partial_states:
raise ValueError('startprob must have length n_partial_states')
if not np.allclose(np.sum(startprob, axis=1), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob = property(_get_startprob, _set_startprob)
def _get_steadyprob(self):
return np.exp(self._log_steadyprob)
def _set_steadyprob(self, steadyprob):
if steadyprob is None:
steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
steadyprob = np.asarray(steadyprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(steadyprob):
steadyprob = normalize(steadyprob, axis=1)
if len(steadyprob) != self.n_partial_states:
raise ValueError('steadyprob must have length n_partial_states')
if not np.allclose(np.sum(steadyprob, axis=1), 1.0):
raise ValueError('steadyprob must sum to 1.0')
self._log_steadyprob = np.log(np.asarray(steadyprob).copy())
steadyprob = property(_get_steadyprob, _set_steadyprob)
def _get_transmat(self):
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
transmat = normalize(transmat, axis=3)
if (np.asarray(transmat).shape
!= (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states)):
raise ValueError('transmat must have shape '
'(n_partial_states,n_partial_states,n_hidden_states,n_hidden_states)')
if not np.all(np.allclose(np.sum(transmat, axis=3), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat = property(_get_transmat, _set_transmat)
def _compute_log_likelihood(self, obs, pstates_idx):
q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx]
sigma = self.emission[feature_name]['sigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(
np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(np.maximum(MIN_PROBA,
stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,
scale=np.exp(logmu[:, j]))))
q = q.sum(axis=2)
return q
def _generate_sample_from_state(self, hidden_state, pstates_idx, random_state=None):
sample = np.zeros(self.n_features)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx][hidden_state]
sigma = self.emission[feature_name]['sigma'][pstates_idx][hidden_state]
sample[col] = stats.norm.rvs(loc=mu, scale=sigma, random_state=random_state)
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx][hidden_state]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx][hidden_state]
sample[col] = stats.lognorm.rvs(logsigma, loc=0, scale=np.exp(logmu), random_state=random_state)
return sample
def _init_pstates(self, unique_pstates):
# Map events to a unique index. The unknown p-state is at idx 0
self.e = defaultdict(int)
self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))
self.er = {v: k for k, v in self.e.items()} # Reverse lookup
self.er[0] = UNKNOWN_PSTATE
self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state
return
def _init_pstate_freqs(self, pstates_idx):
# Partial state frequencies
self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])
self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])
# Store freqs for the meta state
self.pstate_freq[0] = len(np.concatenate(pstates_idx))
for seq in pstates_idx:
self.pstate_startprob[seq[0]] += 1
self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)
for idx1, idx2 in zip(seq[:-1], seq[1:]):
self.pstate_trans_freq[(0, 0)] += 1
self.pstate_trans_freq[(idx1, 0)] += 1
self.pstate_trans_freq[(0, idx2)] += 1
self.pstate_transmat[idx1, idx2] += 1
# TODO: separate probas from freqs
# Normalize to get the probabilities, ignore the meta state at idx 0
self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])
self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)
self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])
return
def _init_from_obs(self, obs, pstates_idx):
# Partial state probabilities
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
# obs should be (N*T, n_features)
# N is the number of samples
# T is the size of each sample
obs = np.concatenate(obs)
pstates_idx = np.concatenate(pstates_idx)
# Initialize starting and transition probas
self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# Initialize emission parameters
# Hidden states are ordered by the first feature
feature1 = self.emission_name[0]
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \
obs[:, col].std() * np.linspace(
-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(
obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()
return
def _init_random(self, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)
self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(
size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)
self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])
self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),
random_state=random_state)
transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),
random_state=random_state)
self.transmat = normalize(transmat, axis=3)
# Initialize emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(
self.n_partial_states, self.n_hidden_states))
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],
size=(self.n_partial_states,
self.n_hidden_states))
if self.emission_distr[0] == 'normal':
self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)
elif self.emission_distr[0] == 'lognormal':
self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],
axis=1)
return
def _smooth(self):
self._compute_marginals()
startprob = self.startprob
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing['startprob']:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing['startprob']:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing['startprob']:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing['startprob']:
w_ = 1
w_j = 0
elif self.smoothing['startprob'] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])
startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]
self.startprob = startprob
transmat = self.transmat
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
if 'freq' == self.smoothing['transmat']:
w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])
w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])
w_ij = 1 - (w_i0 + w_0j)
w_ = 0
elif 'proba' == self.smoothing['transmat']:
denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()
w_i0 = self.pstate_transmat[i, :].sum() / denom
w_0j = self.pstate_transmat[:, j].sum() / denom
w_ij = self.pstate_transmat[i, j] / denom
w_ = 0
elif 'exp' == self.smoothing['transmat']:
w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])
w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])
w_ = self.pstate_trans_freq[0, 0] * np.exp(
-(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))
w_ij = self.pstate_trans_freq[i, j]
w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))
elif 'fixed' == self.smoothing['transmat']:
w_i0 = 0
w_0j = 0
w_ = 1
w_ij = 0
elif self.smoothing['transmat'] is None:
w_i0 = 0
w_0j = 0
w_ = 0
w_ij = 1
else:
raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])
assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE
transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \
self.transmat[
0, 0]
self.transmat = transmat
assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[feature_distr]:
key = '%s:%s' % (feature_name, param)
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing[key]:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing[key]:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing[key]:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing[key]:
w_ = 1
w_j = 0
elif self.smoothing[key] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for ' + key)
self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \
self.emission[
feature_name][
param][0]
return
def _initialize_sufficient_statistics(self):
stats = {
'nobs': 0,
'post': np.zeros((self.n_partial_states, self.n_hidden_states)),
'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'start': np.zeros((self.n_partial_states, self.n_hidden_states)),
'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),
'trans': np.zeros(
(self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
}
return stats
def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,
posteriors, fwdlattice, bwdlattice):
stats['nobs'] += 1
n_observations, n_hidden_states = framelogprob.shape
stats['start'][0] += posteriors[0]
for i in range(self.n_partial_states):
if len(np.where(pstates_idx == i)[0]) > 0:
stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))
_hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):
if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:
stats['trans'][i, j] += np.exp(
logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))
for i in range(self.n_partial_states):
stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)
stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])
stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)
stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))
stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)
return
def _compute_marginals(self):
# TODO: cythonize some of this
# Start prob, weighted by p-state start probs
self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)
# Use the p-state transmat and transmat to get the full transmat
full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])
full_steadyprob = steadystate(full_transmat)
# Steady state probas are determined by the full trans mat, need to be updated
steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)
for i in range(self.n_partial_states - 1):
steadyprob[i + 1] = normalize(
full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])
self.steadyprob = steadyprob
# Update the transations to/from the marginal state
transmat = self.transmat
# Group the hidden states within each partial state
for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):
transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()
for pidx in range(self.n_partial_states - 1):
transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,
hidx2::self.n_hidden_states].sum()
transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,
pidx * self.n_hidden_states + hidx2].sum()
self.transmat = normalize(transmat, axis=3)
pweights = self.pstate_steadyprob[1:, np.newaxis]
# Update emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['mu'][1:, :]
sigma = self.emission[feature_name]['sigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['mu'][0, :] = mu_0
self.emission[feature_name]['sigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
if feature_distr == 'lognormal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['logmu'][1:, :]
sigma = self.emission[feature_name]['logsigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['logmu'][0, :] = mu_0
self.emission[feature_name]['logsigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
return
def _do_mstep(self, stats):
self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)
self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['obs'][:, :, col] / denom
cv_num = (stats['obs**2'][:, :, col]
- 2 * mu * stats['obs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['mu'] = mu
self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)
if feature_distr == 'lognormal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['lnobs'][:, :, col] / denom
cv_num = (stats['lnobs**2'][:, :, col]
- 2 * mu * stats['lnobs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['logmu'] = mu
self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)
return
def _do_forward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._forward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._backward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _do_viterbi_pass(self, framelogprob, event_idx):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components,
event_idx, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def fit(self, obs, pstates, unique_pstates=None):
"""
Estimate model parameters.
"""
obs = [np.array(o) for o in obs]
pstates = [np.array(p) for p in pstates]
# List or array of observation sequences
assert len(obs) == len(pstates)
assert obs[0].ndim == 2
assert pstates[0].ndim == 1
if unique_pstates is not None:
self._init_pstates(unique_pstates)
else:
self._init_pstates(list(set(np.concatenate(pstates))))
# Map the partial states to a unique index
pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]
if self.init_method == 'rand':
self._init_random()
elif self.init_method == 'obs':
self._init_from_obs(obs, pstates_idx)
self._init_pstate_freqs(pstates_idx)
self._smooth()
logprob = []
for i in range(self.max_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_i, pstates_idx_i in zip(obs, pstates_idx):
framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)
lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)
bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(stats, obs_i,
pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)
logprob.append(curr_logprob)
self.logprob_ = curr_logprob
# Check for convergence.
self.n_iter_performed_ = i
if i > 0:
self.logprob_delta = logprob[-1] - logprob[-2]
if self.logprob_delta < self.thresh:
break
# Maximization step
self._do_mstep(stats)
# Mix the parameters
self._smooth()
return self
def score(self, obs, pstates):
"""
Compute the log probability under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
def score_events(self, obs, pstates):
"""
Compute the log probability of each event under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
L = logsumexp(fwdlattice, axis=1)
return np.concatenate([L[[0]], np.diff(L)])
def predict_states(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob, pstates_idx)
return viterbi_logprob, state_sequence
def predict(self, obs, pstates, next_pstate=None):
"""
Predict the next observation
"""
assert len(obs) == len(pstates)
pstates_idx = np.array([self.e[ei] for ei in pstates])
next_pstate_idx = self.e[next_pstate]
if len(obs) == 0:
# No history, use the starting probas
next_hstate_prob = self.startprob[next_pstate_idx]
else:
# With a history, determine the hidden state posteriors using
# the last posteriors and transition matrix
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
next_hstate_prob = np.zeros(self.n_hidden_states)
alpha_n = fwdlattice[-1]
vmax = alpha_n.max(axis=0)
alpha_n = np.exp(alpha_n - vmax)
alpha_n = alpha_n / alpha_n.sum()
trans = self.transmat[pstates_idx[-1], next_pstate_idx]
for i in range(self.n_hidden_states):
next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)])
assert next_hstate_prob.sum() - 1 < TOLERANCE
# Make the prediction
prediction = np.array(
[self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in
self.emission_name])
# next_hstate = np.argmax(next_hstate_prob)
# prediction = np.array(
# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in
# self.emission_name])
return prediction
def gen_pstates_idx(self, n, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.pstate_startprob)
transmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial state.
rand = random_state.rand()
curr_pstate = (startprob_cdf > rand).argmax()
pstates = [curr_pstate]
for _ in range(1, n):
rand = random_state.rand()
curr_pstate = (transmat_cdf[curr_pstate] > rand).argmax()
pstates.append(curr_pstate)
return np.array(pstates, dtype=int)
def sample(self, pstates=None, n_obs=None, random_state=None):
"""
"""
random_state = check_random_state(random_state)
if pstates is None and n_obs is None:
raise Exception('Must provide either pstates or n_obs')
if pstates is not None and n_obs is not None:
raise Exception('Must provide either pstates or n_obs but not both')
gen_pstates = False
rand = random_state.rand()
if pstates is None:
gen_pstates = True
pstartprob_cdf = np.cumsum(self.pstate_startprob)
ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial pstate
currpstate = (pstartprob_cdf > rand).argmax()
pstates_idx = [currpstate]
pstates = [self.er[currpstate]]
else:
n_obs = len(pstates)
pstates_idx = np.array([self.e[p] for p in pstates])
startprob_pdf = self.startprob[pstates_idx[0]]
startprob_cdf = np.cumsum(startprob_pdf)
transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)
# Initial hidden state
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]
for i in range(1, n_obs):
rand = random_state.rand()
if gen_pstates:
currpstate = (ptransmat_cdf[currpstate] > rand).argmax()
pstates_idx.append(currpstate)
pstates.append(self.er[currpstate])
transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))
return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)
def fit_df(self, dfs, pstate_col=PSTATE_COL):
"""
Convenience function to fit a model from a list of dataframes
"""
obs_cols = list(self.emission_name)
obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs]
pstates = [df[pstate_col].values for df in dfs]
return self.fit(obs, pstates)
def score_df(self, df, pstate_col=PSTATE_COL):
"""
"""
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.score(obs, pstates)
def score_events_df(self, df, pstate_col=PSTATE_COL, score_col='score'):
"""
"""
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
df[score_col] = self.score_events(obs, pstates)
return df
def predict_states_df(self, df, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
_, df[hstate_col] = self.predict_states(obs, pstates)
return df
def predict_df(self, df, next_pstate=None, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.predict(obs, pstates, next_pstate)
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
"""
Convenience function to generate samples a model and create a dataframe
"""
try:
import pandas as pd
except Exception as e:
raise e
obs, pstates, hstates = self.sample(pstates, n_obs, random_state)
items = []
if pstate_col is not None:
items.append((pstate_col, pstates))
if hstate_col is not None:
items.append((hstate_col, hstates))
items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)]
df = pd.DataFrame.from_items(items)
return df
def __str__(self):
pstates = sorted(self.e.keys())
sep = '-' * 80 + '\n'
sep2 = '_' * 40 + '\n'
out = 'POHMM\n'
out += 'H-states: %d\n' % self.n_hidden_states
out += 'P-states: (%d) %s\n' % (len(pstates), str(pstates))
out += 'Emission: %s\n' % (self.emission_distr)
out += sep
out += 'Transition probas\n'
out += sep2
out += '. -> .\n%s\n' % str(self.transmat[0, 0])
for pstate in pstates:
out += sep2
out += '%s -> .\n%s\n' % (pstate, str(self.transmat[self.e[pstate], 0]))
out += sep2
out += '. -> %s\n%s\n' % (pstate, str(self.transmat[0, self.e[pstate]]))
for pstate1, pstate2 in product(pstates, pstates):
out += sep2
out += '%s -> %s\n%s\n' % (pstate1, pstate2, str(self.transmat[self.e[pstate1], self.e[pstate2]]))
out += sep
out += 'Starting probas\n'
out += '.: %s\n' % str(self.startprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.startprob[self.e[pstate]]))
out += sep
out += 'Steady probas\n'
out += '.: %s\n' % str(self.steadyprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.steadyprob[self.e[pstate]]))
out += sep
out += 'Emissions\n'
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
out += sep2
out += 'Feature %s: %s\n' % (feature_name, feature_distr)
for param in _DISTRIBUTIONS[feature_distr]:
out += '.: %s = %s\n' % (param, str(self.emission[feature_name][param][0]))
for pstate in pstates:
out += '%s: %s = %s\n' % (pstate, param, str(self.emission[feature_name][param][self.e[pstate]]))
out += sep
return out
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return
def pdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * pdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * pdf(xi))
return p
return fn
def cdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * cdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * cdf(xi))
return p
return fn
def params(self, pstates=None):
if pstates is None:
pstates = [None] + sorted(
set(self.er.values())) # TODO: self.e caches any unknown value, maybe it shouldn't?
params = []
# emission parameters
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
for feature, distr in zip(self.emission_name, self.emission_distr):
for feature_param in _DISTRIBUTIONS[distr]:
params.append(self.emission[feature][feature_param][self.e[pstate_label], hstate])
# transition parameters, diagonals only assuming 2 state
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
params.append(self.transmat[self.e[pstate_label], self.e[pstate_label], hstate, hstate])
return np.array(params)
|
vmonaco/pohmm | pohmm/pohmm.py | Pohmm.fit | python | def fit(self, obs, pstates, unique_pstates=None):
obs = [np.array(o) for o in obs]
pstates = [np.array(p) for p in pstates]
# List or array of observation sequences
assert len(obs) == len(pstates)
assert obs[0].ndim == 2
assert pstates[0].ndim == 1
if unique_pstates is not None:
self._init_pstates(unique_pstates)
else:
self._init_pstates(list(set(np.concatenate(pstates))))
# Map the partial states to a unique index
pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]
if self.init_method == 'rand':
self._init_random()
elif self.init_method == 'obs':
self._init_from_obs(obs, pstates_idx)
self._init_pstate_freqs(pstates_idx)
self._smooth()
logprob = []
for i in range(self.max_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_i, pstates_idx_i in zip(obs, pstates_idx):
framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)
lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)
bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(stats, obs_i,
pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)
logprob.append(curr_logprob)
self.logprob_ = curr_logprob
# Check for convergence.
self.n_iter_performed_ = i
if i > 0:
self.logprob_delta = logprob[-1] - logprob[-2]
if self.logprob_delta < self.thresh:
break
# Maximization step
self._do_mstep(stats)
# Mix the parameters
self._smooth()
return self | Estimate model parameters. | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L671-L731 | [
"def logsumexp(arr, axis=0):\n \"\"\"\n Computes the sum of arr assuming arr is in the log domain.\n\n Returns log(sum(exp(arr))) while minimizing the possibility of\n over/underflow.\n \"\"\"\n arr = np.rollaxis(arr, axis)\n # Use the max to normalize, as with the log this is what accumulates\n # the less errors\n vmax = arr.max(axis=0)\n out = np.log(np.sum(np.exp(arr - vmax), axis=0))\n out += vmax\n return out\n",
"def _compute_log_likelihood(self, obs, pstates_idx):\n\n q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))\n\n for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):\n if feature_distr == 'normal':\n mu = self.emission[feature_name]['mu'][pstates_idx]\n sigma = self.emission[feature_name]['sigma'][pstates_idx]\n for j in range(self.n_hidden_states):\n q[:, j, col] = np.log(\n np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))\n if feature_distr == 'lognormal':\n logmu = self.emission[feature_name]['logmu'][pstates_idx]\n logsigma = self.emission[feature_name]['logsigma'][pstates_idx]\n for j in range(self.n_hidden_states):\n q[:, j, col] = np.log(np.maximum(MIN_PROBA,\n stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,\n scale=np.exp(logmu[:, j]))))\n\n q = q.sum(axis=2)\n return q\n",
"def _init_pstates(self, unique_pstates):\n # Map events to a unique index. The unknown p-state is at idx 0\n self.e = defaultdict(int)\n self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))\n self.er = {v: k for k, v in self.e.items()} # Reverse lookup\n self.er[0] = UNKNOWN_PSTATE\n self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state\n return\n",
"def _init_pstate_freqs(self, pstates_idx):\n # Partial state frequencies\n self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])\n self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])\n\n # Store freqs for the meta state\n self.pstate_freq[0] = len(np.concatenate(pstates_idx))\n\n for seq in pstates_idx:\n self.pstate_startprob[seq[0]] += 1\n self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)\n for idx1, idx2 in zip(seq[:-1], seq[1:]):\n self.pstate_trans_freq[(0, 0)] += 1\n self.pstate_trans_freq[(idx1, 0)] += 1\n self.pstate_trans_freq[(0, idx2)] += 1\n\n self.pstate_transmat[idx1, idx2] += 1\n\n # TODO: separate probas from freqs\n # Normalize to get the probabilities, ignore the meta state at idx 0\n self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])\n self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)\n self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])\n\n return\n",
"def _init_from_obs(self, obs, pstates_idx):\n # Partial state probabilities\n self.pstate_startprob = np.zeros(self.n_partial_states)\n self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))\n self.pstate_steadyprob = np.zeros(self.n_partial_states)\n\n # obs should be (N*T, n_features)\n # N is the number of samples\n # T is the size of each sample \n obs = np.concatenate(obs)\n pstates_idx = np.concatenate(pstates_idx)\n\n # Initialize starting and transition probas\n self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states\n self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states\n self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,\n self.n_hidden_states)) / self.n_hidden_states\n\n # Initialize emission parameters\n # Hidden states are ordered by the first feature\n feature1 = self.emission_name[0]\n for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):\n if feature_distr == 'normal':\n self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))\n self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))\n for idx in range(1, self.n_partial_states):\n idx_pstate = (pstates_idx == idx)\n\n if not np.any(idx_pstate):\n idx_pstate = np.arange(len(pstates_idx))\n\n if feature_name == feature1:\n self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)\n self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \\\n obs[:, col].std() * np.linspace(\n -self.init_spread, self.init_spread,\n self.n_hidden_states)\n else:\n self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)\n self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()\n\n if feature_distr == 'lognormal':\n self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))\n self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))\n\n for idx in range(1, self.n_partial_states):\n idx_pstate = (pstates_idx == idx)\n\n if not np.any(idx_pstate):\n idx_pstate = np.arange(len(pstates_idx))\n\n if feature_name == feature1:\n self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),\n MIN_PROBA)\n self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(\n obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,\n self.n_hidden_states)\n else:\n self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),\n MIN_PROBA)\n self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()\n\n return\n",
"def _init_random(self, random_state=None):\n\n if random_state is None:\n random_state = self.random_state\n random_state = check_random_state(random_state)\n\n self.pstate_startprob = np.zeros(self.n_partial_states)\n self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))\n self.pstate_steadyprob = np.zeros(self.n_partial_states)\n\n self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)\n self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(\n size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)\n self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])\n\n self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),\n random_state=random_state)\n\n transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))\n for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):\n transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),\n random_state=random_state)\n self.transmat = normalize(transmat, axis=3)\n\n # Initialize emission parameters\n for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):\n if feature_distr == 'normal':\n self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(\n self.n_partial_states, self.n_hidden_states))\n self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(\n self.n_partial_states, self.n_hidden_states))\n if feature_distr == 'lognormal':\n self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(\n self.n_partial_states, self.n_hidden_states))\n self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],\n size=(self.n_partial_states,\n self.n_hidden_states))\n\n if self.emission_distr[0] == 'normal':\n self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)\n elif self.emission_distr[0] == 'lognormal':\n self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],\n axis=1)\n\n return\n",
"def _smooth(self):\n self._compute_marginals()\n\n startprob = self.startprob\n for j in range(1, self.n_partial_states):\n if 'freq' == self.smoothing['startprob']:\n w_ = 1 / (1 + self.pstate_freq[j])\n w_j = 1 - w_\n elif 'proba' == self.smoothing['startprob']:\n w_j = self.pstate_steadyprob[j]\n w_ = 1 - w_j\n elif 'exp' == self.smoothing['startprob']:\n w_ = np.exp(-self.pstate_freq[j])\n w_j = 1 - w_\n elif 'fixed' == self.smoothing['startprob']:\n w_ = 1\n w_j = 0\n elif self.smoothing['startprob'] is None:\n w_ = 0\n w_j = 1\n else:\n raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])\n startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]\n self.startprob = startprob\n\n transmat = self.transmat\n for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):\n if 'freq' == self.smoothing['transmat']:\n w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])\n w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])\n w_ij = 1 - (w_i0 + w_0j)\n w_ = 0\n elif 'proba' == self.smoothing['transmat']:\n denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()\n w_i0 = self.pstate_transmat[i, :].sum() / denom\n w_0j = self.pstate_transmat[:, j].sum() / denom\n w_ij = self.pstate_transmat[i, j] / denom\n w_ = 0\n elif 'exp' == self.smoothing['transmat']:\n w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])\n w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])\n w_ = self.pstate_trans_freq[0, 0] * np.exp(\n -(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))\n w_ij = self.pstate_trans_freq[i, j]\n w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))\n elif 'fixed' == self.smoothing['transmat']:\n w_i0 = 0\n w_0j = 0\n w_ = 1\n w_ij = 0\n elif self.smoothing['transmat'] is None:\n w_i0 = 0\n w_0j = 0\n w_ = 0\n w_ij = 1\n else:\n raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])\n\n assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE\n transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \\\n self.transmat[\n 0, 0]\n self.transmat = transmat\n\n assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)\n assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)\n assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)\n\n for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):\n for param in _DISTRIBUTIONS[feature_distr]:\n key = '%s:%s' % (feature_name, param)\n for j in range(1, self.n_partial_states):\n if 'freq' == self.smoothing[key]:\n w_ = 1 / (1 + self.pstate_freq[j])\n w_j = 1 - w_\n elif 'proba' == self.smoothing[key]:\n w_j = self.pstate_steadyprob[j]\n w_ = 1 - w_j\n elif 'exp' == self.smoothing[key]:\n w_ = np.exp(-self.pstate_freq[j])\n w_j = 1 - w_\n elif 'fixed' == self.smoothing[key]:\n w_ = 1\n w_j = 0\n elif self.smoothing[key] is None:\n w_ = 0\n w_j = 1\n else:\n raise Exception('Wrong smoothing for ' + key)\n self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \\\n self.emission[\n feature_name][\n param][0]\n\n return\n",
"def _initialize_sufficient_statistics(self):\n stats = {\n 'nobs': 0,\n 'post': np.zeros((self.n_partial_states, self.n_hidden_states)),\n 'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),\n 'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),\n 'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),\n 'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),\n 'start': np.zeros((self.n_partial_states, self.n_hidden_states)),\n 'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),\n 'trans': np.zeros(\n (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))\n }\n\n return stats\n",
"def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,\n posteriors, fwdlattice, bwdlattice):\n stats['nobs'] += 1\n n_observations, n_hidden_states = framelogprob.shape\n\n stats['start'][0] += posteriors[0]\n for i in range(self.n_partial_states):\n if len(np.where(pstates_idx == i)[0]) > 0:\n stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]\n\n if n_observations > 1:\n lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))\n _hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,\n self._log_transmat, bwdlattice, framelogprob,\n lneta)\n\n for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):\n if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:\n stats['trans'][i, j] += np.exp(\n logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))\n\n for i in range(self.n_partial_states):\n stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)\n stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])\n stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)\n stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))\n stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)\n\n return\n",
"def _do_mstep(self, stats):\n self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)\n self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)\n\n for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):\n if feature_distr == 'normal':\n denom = np.maximum(stats['post'], MIN_PROBA)\n mu = stats['obs'][:, :, col] / denom\n cv_num = (stats['obs**2'][:, :, col]\n - 2 * mu * stats['obs'][:, :, col]\n + mu ** 2 * denom)\n sigma = np.sqrt(cv_num / denom)\n sigma[np.isnan(sigma)] = MIN_PROBA\n self.emission[feature_name]['mu'] = mu\n self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)\n if feature_distr == 'lognormal':\n denom = np.maximum(stats['post'], MIN_PROBA)\n mu = stats['lnobs'][:, :, col] / denom\n cv_num = (stats['lnobs**2'][:, :, col]\n - 2 * mu * stats['lnobs'][:, :, col]\n + mu ** 2 * denom)\n sigma = np.sqrt(cv_num / denom)\n sigma[np.isnan(sigma)] = MIN_PROBA\n self.emission[feature_name]['logmu'] = mu\n self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)\n\n return\n",
"def _do_forward_pass(self, framelogprob, event_idx):\n n_observations, n_hidden_states = framelogprob.shape\n fwdlattice = np.zeros((n_observations, n_hidden_states))\n _hmmc._forward(n_observations, n_hidden_states,\n event_idx, self._log_startprob,\n self._log_transmat, framelogprob, fwdlattice)\n\n return logsumexp(fwdlattice[-1]), fwdlattice\n",
"def _do_backward_pass(self, framelogprob, event_idx):\n n_observations, n_hidden_states = framelogprob.shape\n bwdlattice = np.zeros((n_observations, n_hidden_states))\n _hmmc._backward(n_observations, n_hidden_states,\n event_idx, self._log_startprob,\n self._log_transmat, framelogprob, bwdlattice)\n return bwdlattice\n"
] | class Pohmm(object):
"""
Partially observable hidden Markov model
"""
def __init__(self,
n_hidden_states=2,
emissions=['normal'],
max_iter=1000,
thresh=1e-6,
init_method='obs',
init_spread=2,
smoothing=None,
random_state=None):
if type(n_hidden_states) is int:
self.n_hidden_states = n_hidden_states
else:
raise Exception('Wrong type for n_hidden_states. Must be int')
if type(emissions[0]) is tuple:
emission_name, emission_distr = zip(*emissions)
elif type(emissions[0]) is str:
emission_name, emission_distr = np.arange(len(emissions)), emissions
for distr in emission_distr:
if distr not in _DISTRIBUTIONS.keys():
raise ValueError('Emission distribution must be one of', _DISTRIBUTIONS.keys())
self.emission_name = emission_name
self.emission_distr = emission_distr
self.emission_name_distr = dict(zip(emission_name, emission_distr))
self.n_features = len(emissions)
# Set up the emission parameters
# emission: {'feature':{'param': np.array(shape=(n_partial_states, n_hidden_states))}}
self.emission = defaultdict(dict)
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
self.emission[name][param] = None
self.emission = dict(self.emission)
assert max_iter >= 0
assert thresh >= 0
self.max_iter = max_iter
self.thresh = thresh
assert init_spread >= 0
self.init_spread = init_spread
if init_method not in _INIT_METHODS:
raise ValueError('init_method must be one of', _INIT_METHODS)
self.init_method = init_method
if smoothing is None:
smoothing = {'transmat': None, 'startprob': None}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = None
elif type(smoothing) is str:
s = smoothing
smoothing = {'transmat': s, 'startprob': s}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = s
elif type(smoothing) is dict:
assert 'transmat' in smoothing.keys()
assert 'startprob' in smoothing.keys()
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
assert param in smoothing.keys() or '%s:%s' % (name, param) in smoothing.keys()
if param in smoothing.keys() and '%s:%s' % (name, param) not in smoothing.keys():
smoothing['%s:%s' % (name, param)] = smoothing[param]
else:
raise Exception('Wrong type for smoothing. Must be None, str, or dict')
self.smoothing = smoothing
self.random_state = random_state
# Number of unique partial states is unknown until fit
self.n_partial_states = None
# Results after fitting the model
self.logprob_ = None
self.n_iter_performed_ = None
self.logprob_delta_ = None
# Mapping between p-states and a unique index
# Defaults to 0 for unknown or missing p-states
self.e = defaultdict(int)
def _get_startprob(self):
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
startprob = normalize(startprob, axis=1)
if len(startprob) != self.n_partial_states:
raise ValueError('startprob must have length n_partial_states')
if not np.allclose(np.sum(startprob, axis=1), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob = property(_get_startprob, _set_startprob)
def _get_steadyprob(self):
return np.exp(self._log_steadyprob)
def _set_steadyprob(self, steadyprob):
if steadyprob is None:
steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
steadyprob = np.asarray(steadyprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(steadyprob):
steadyprob = normalize(steadyprob, axis=1)
if len(steadyprob) != self.n_partial_states:
raise ValueError('steadyprob must have length n_partial_states')
if not np.allclose(np.sum(steadyprob, axis=1), 1.0):
raise ValueError('steadyprob must sum to 1.0')
self._log_steadyprob = np.log(np.asarray(steadyprob).copy())
steadyprob = property(_get_steadyprob, _set_steadyprob)
def _get_transmat(self):
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
transmat = normalize(transmat, axis=3)
if (np.asarray(transmat).shape
!= (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states)):
raise ValueError('transmat must have shape '
'(n_partial_states,n_partial_states,n_hidden_states,n_hidden_states)')
if not np.all(np.allclose(np.sum(transmat, axis=3), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat = property(_get_transmat, _set_transmat)
def _compute_log_likelihood(self, obs, pstates_idx):
q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx]
sigma = self.emission[feature_name]['sigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(
np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(np.maximum(MIN_PROBA,
stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,
scale=np.exp(logmu[:, j]))))
q = q.sum(axis=2)
return q
def _generate_sample_from_state(self, hidden_state, pstates_idx, random_state=None):
sample = np.zeros(self.n_features)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx][hidden_state]
sigma = self.emission[feature_name]['sigma'][pstates_idx][hidden_state]
sample[col] = stats.norm.rvs(loc=mu, scale=sigma, random_state=random_state)
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx][hidden_state]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx][hidden_state]
sample[col] = stats.lognorm.rvs(logsigma, loc=0, scale=np.exp(logmu), random_state=random_state)
return sample
def _init_pstates(self, unique_pstates):
# Map events to a unique index. The unknown p-state is at idx 0
self.e = defaultdict(int)
self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))
self.er = {v: k for k, v in self.e.items()} # Reverse lookup
self.er[0] = UNKNOWN_PSTATE
self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state
return
def _init_pstate_freqs(self, pstates_idx):
# Partial state frequencies
self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])
self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])
# Store freqs for the meta state
self.pstate_freq[0] = len(np.concatenate(pstates_idx))
for seq in pstates_idx:
self.pstate_startprob[seq[0]] += 1
self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)
for idx1, idx2 in zip(seq[:-1], seq[1:]):
self.pstate_trans_freq[(0, 0)] += 1
self.pstate_trans_freq[(idx1, 0)] += 1
self.pstate_trans_freq[(0, idx2)] += 1
self.pstate_transmat[idx1, idx2] += 1
# TODO: separate probas from freqs
# Normalize to get the probabilities, ignore the meta state at idx 0
self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])
self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)
self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])
return
def _init_from_obs(self, obs, pstates_idx):
# Partial state probabilities
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
# obs should be (N*T, n_features)
# N is the number of samples
# T is the size of each sample
obs = np.concatenate(obs)
pstates_idx = np.concatenate(pstates_idx)
# Initialize starting and transition probas
self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# Initialize emission parameters
# Hidden states are ordered by the first feature
feature1 = self.emission_name[0]
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \
obs[:, col].std() * np.linspace(
-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(
obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()
return
def _init_random(self, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)
self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(
size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)
self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])
self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),
random_state=random_state)
transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),
random_state=random_state)
self.transmat = normalize(transmat, axis=3)
# Initialize emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(
self.n_partial_states, self.n_hidden_states))
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],
size=(self.n_partial_states,
self.n_hidden_states))
if self.emission_distr[0] == 'normal':
self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)
elif self.emission_distr[0] == 'lognormal':
self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],
axis=1)
return
def _smooth(self):
self._compute_marginals()
startprob = self.startprob
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing['startprob']:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing['startprob']:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing['startprob']:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing['startprob']:
w_ = 1
w_j = 0
elif self.smoothing['startprob'] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])
startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]
self.startprob = startprob
transmat = self.transmat
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
if 'freq' == self.smoothing['transmat']:
w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])
w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])
w_ij = 1 - (w_i0 + w_0j)
w_ = 0
elif 'proba' == self.smoothing['transmat']:
denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()
w_i0 = self.pstate_transmat[i, :].sum() / denom
w_0j = self.pstate_transmat[:, j].sum() / denom
w_ij = self.pstate_transmat[i, j] / denom
w_ = 0
elif 'exp' == self.smoothing['transmat']:
w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])
w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])
w_ = self.pstate_trans_freq[0, 0] * np.exp(
-(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))
w_ij = self.pstate_trans_freq[i, j]
w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))
elif 'fixed' == self.smoothing['transmat']:
w_i0 = 0
w_0j = 0
w_ = 1
w_ij = 0
elif self.smoothing['transmat'] is None:
w_i0 = 0
w_0j = 0
w_ = 0
w_ij = 1
else:
raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])
assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE
transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \
self.transmat[
0, 0]
self.transmat = transmat
assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[feature_distr]:
key = '%s:%s' % (feature_name, param)
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing[key]:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing[key]:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing[key]:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing[key]:
w_ = 1
w_j = 0
elif self.smoothing[key] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for ' + key)
self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \
self.emission[
feature_name][
param][0]
return
def _initialize_sufficient_statistics(self):
stats = {
'nobs': 0,
'post': np.zeros((self.n_partial_states, self.n_hidden_states)),
'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'start': np.zeros((self.n_partial_states, self.n_hidden_states)),
'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),
'trans': np.zeros(
(self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
}
return stats
def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,
posteriors, fwdlattice, bwdlattice):
stats['nobs'] += 1
n_observations, n_hidden_states = framelogprob.shape
stats['start'][0] += posteriors[0]
for i in range(self.n_partial_states):
if len(np.where(pstates_idx == i)[0]) > 0:
stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))
_hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):
if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:
stats['trans'][i, j] += np.exp(
logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))
for i in range(self.n_partial_states):
stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)
stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])
stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)
stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))
stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)
return
def _compute_marginals(self):
# TODO: cythonize some of this
# Start prob, weighted by p-state start probs
self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)
# Use the p-state transmat and transmat to get the full transmat
full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])
full_steadyprob = steadystate(full_transmat)
# Steady state probas are determined by the full trans mat, need to be updated
steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)
for i in range(self.n_partial_states - 1):
steadyprob[i + 1] = normalize(
full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])
self.steadyprob = steadyprob
# Update the transations to/from the marginal state
transmat = self.transmat
# Group the hidden states within each partial state
for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):
transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()
for pidx in range(self.n_partial_states - 1):
transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,
hidx2::self.n_hidden_states].sum()
transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,
pidx * self.n_hidden_states + hidx2].sum()
self.transmat = normalize(transmat, axis=3)
pweights = self.pstate_steadyprob[1:, np.newaxis]
# Update emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['mu'][1:, :]
sigma = self.emission[feature_name]['sigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['mu'][0, :] = mu_0
self.emission[feature_name]['sigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
if feature_distr == 'lognormal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['logmu'][1:, :]
sigma = self.emission[feature_name]['logsigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['logmu'][0, :] = mu_0
self.emission[feature_name]['logsigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
return
def _do_mstep(self, stats):
self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)
self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['obs'][:, :, col] / denom
cv_num = (stats['obs**2'][:, :, col]
- 2 * mu * stats['obs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['mu'] = mu
self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)
if feature_distr == 'lognormal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['lnobs'][:, :, col] / denom
cv_num = (stats['lnobs**2'][:, :, col]
- 2 * mu * stats['lnobs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['logmu'] = mu
self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)
return
def _do_forward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._forward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._backward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _do_viterbi_pass(self, framelogprob, event_idx):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components,
event_idx, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def rand(self, unique_pstates, random_state=None):
"""
Randomize the POHMM parameters
"""
self._init_pstates(unique_pstates)
self._init_random(random_state=random_state)
self._compute_marginals()
return self
def score(self, obs, pstates):
"""
Compute the log probability under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
def score_events(self, obs, pstates):
"""
Compute the log probability of each event under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
L = logsumexp(fwdlattice, axis=1)
return np.concatenate([L[[0]], np.diff(L)])
def predict_states(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob, pstates_idx)
return viterbi_logprob, state_sequence
def predict(self, obs, pstates, next_pstate=None):
"""
Predict the next observation
"""
assert len(obs) == len(pstates)
pstates_idx = np.array([self.e[ei] for ei in pstates])
next_pstate_idx = self.e[next_pstate]
if len(obs) == 0:
# No history, use the starting probas
next_hstate_prob = self.startprob[next_pstate_idx]
else:
# With a history, determine the hidden state posteriors using
# the last posteriors and transition matrix
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
next_hstate_prob = np.zeros(self.n_hidden_states)
alpha_n = fwdlattice[-1]
vmax = alpha_n.max(axis=0)
alpha_n = np.exp(alpha_n - vmax)
alpha_n = alpha_n / alpha_n.sum()
trans = self.transmat[pstates_idx[-1], next_pstate_idx]
for i in range(self.n_hidden_states):
next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)])
assert next_hstate_prob.sum() - 1 < TOLERANCE
# Make the prediction
prediction = np.array(
[self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in
self.emission_name])
# next_hstate = np.argmax(next_hstate_prob)
# prediction = np.array(
# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in
# self.emission_name])
return prediction
def gen_pstates_idx(self, n, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.pstate_startprob)
transmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial state.
rand = random_state.rand()
curr_pstate = (startprob_cdf > rand).argmax()
pstates = [curr_pstate]
for _ in range(1, n):
rand = random_state.rand()
curr_pstate = (transmat_cdf[curr_pstate] > rand).argmax()
pstates.append(curr_pstate)
return np.array(pstates, dtype=int)
def sample(self, pstates=None, n_obs=None, random_state=None):
"""
"""
random_state = check_random_state(random_state)
if pstates is None and n_obs is None:
raise Exception('Must provide either pstates or n_obs')
if pstates is not None and n_obs is not None:
raise Exception('Must provide either pstates or n_obs but not both')
gen_pstates = False
rand = random_state.rand()
if pstates is None:
gen_pstates = True
pstartprob_cdf = np.cumsum(self.pstate_startprob)
ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial pstate
currpstate = (pstartprob_cdf > rand).argmax()
pstates_idx = [currpstate]
pstates = [self.er[currpstate]]
else:
n_obs = len(pstates)
pstates_idx = np.array([self.e[p] for p in pstates])
startprob_pdf = self.startprob[pstates_idx[0]]
startprob_cdf = np.cumsum(startprob_pdf)
transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)
# Initial hidden state
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]
for i in range(1, n_obs):
rand = random_state.rand()
if gen_pstates:
currpstate = (ptransmat_cdf[currpstate] > rand).argmax()
pstates_idx.append(currpstate)
pstates.append(self.er[currpstate])
transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))
return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)
def fit_df(self, dfs, pstate_col=PSTATE_COL):
"""
Convenience function to fit a model from a list of dataframes
"""
obs_cols = list(self.emission_name)
obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs]
pstates = [df[pstate_col].values for df in dfs]
return self.fit(obs, pstates)
def score_df(self, df, pstate_col=PSTATE_COL):
"""
"""
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.score(obs, pstates)
def score_events_df(self, df, pstate_col=PSTATE_COL, score_col='score'):
"""
"""
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
df[score_col] = self.score_events(obs, pstates)
return df
def predict_states_df(self, df, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
_, df[hstate_col] = self.predict_states(obs, pstates)
return df
def predict_df(self, df, next_pstate=None, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.predict(obs, pstates, next_pstate)
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
"""
Convenience function to generate samples a model and create a dataframe
"""
try:
import pandas as pd
except Exception as e:
raise e
obs, pstates, hstates = self.sample(pstates, n_obs, random_state)
items = []
if pstate_col is not None:
items.append((pstate_col, pstates))
if hstate_col is not None:
items.append((hstate_col, hstates))
items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)]
df = pd.DataFrame.from_items(items)
return df
def __str__(self):
pstates = sorted(self.e.keys())
sep = '-' * 80 + '\n'
sep2 = '_' * 40 + '\n'
out = 'POHMM\n'
out += 'H-states: %d\n' % self.n_hidden_states
out += 'P-states: (%d) %s\n' % (len(pstates), str(pstates))
out += 'Emission: %s\n' % (self.emission_distr)
out += sep
out += 'Transition probas\n'
out += sep2
out += '. -> .\n%s\n' % str(self.transmat[0, 0])
for pstate in pstates:
out += sep2
out += '%s -> .\n%s\n' % (pstate, str(self.transmat[self.e[pstate], 0]))
out += sep2
out += '. -> %s\n%s\n' % (pstate, str(self.transmat[0, self.e[pstate]]))
for pstate1, pstate2 in product(pstates, pstates):
out += sep2
out += '%s -> %s\n%s\n' % (pstate1, pstate2, str(self.transmat[self.e[pstate1], self.e[pstate2]]))
out += sep
out += 'Starting probas\n'
out += '.: %s\n' % str(self.startprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.startprob[self.e[pstate]]))
out += sep
out += 'Steady probas\n'
out += '.: %s\n' % str(self.steadyprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.steadyprob[self.e[pstate]]))
out += sep
out += 'Emissions\n'
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
out += sep2
out += 'Feature %s: %s\n' % (feature_name, feature_distr)
for param in _DISTRIBUTIONS[feature_distr]:
out += '.: %s = %s\n' % (param, str(self.emission[feature_name][param][0]))
for pstate in pstates:
out += '%s: %s = %s\n' % (pstate, param, str(self.emission[feature_name][param][self.e[pstate]]))
out += sep
return out
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return
def pdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * pdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * pdf(xi))
return p
return fn
def cdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * cdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * cdf(xi))
return p
return fn
def params(self, pstates=None):
if pstates is None:
pstates = [None] + sorted(
set(self.er.values())) # TODO: self.e caches any unknown value, maybe it shouldn't?
params = []
# emission parameters
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
for feature, distr in zip(self.emission_name, self.emission_distr):
for feature_param in _DISTRIBUTIONS[distr]:
params.append(self.emission[feature][feature_param][self.e[pstate_label], hstate])
# transition parameters, diagonals only assuming 2 state
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
params.append(self.transmat[self.e[pstate_label], self.e[pstate_label], hstate, hstate])
return np.array(params)
|
vmonaco/pohmm | pohmm/pohmm.py | Pohmm.score | python | def score(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob | Compute the log probability under the model. | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L733-L740 | [
"def _compute_log_likelihood(self, obs, pstates_idx):\n\n q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))\n\n for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):\n if feature_distr == 'normal':\n mu = self.emission[feature_name]['mu'][pstates_idx]\n sigma = self.emission[feature_name]['sigma'][pstates_idx]\n for j in range(self.n_hidden_states):\n q[:, j, col] = np.log(\n np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))\n if feature_distr == 'lognormal':\n logmu = self.emission[feature_name]['logmu'][pstates_idx]\n logsigma = self.emission[feature_name]['logsigma'][pstates_idx]\n for j in range(self.n_hidden_states):\n q[:, j, col] = np.log(np.maximum(MIN_PROBA,\n stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,\n scale=np.exp(logmu[:, j]))))\n\n q = q.sum(axis=2)\n return q\n",
"def _do_forward_pass(self, framelogprob, event_idx):\n n_observations, n_hidden_states = framelogprob.shape\n fwdlattice = np.zeros((n_observations, n_hidden_states))\n _hmmc._forward(n_observations, n_hidden_states,\n event_idx, self._log_startprob,\n self._log_transmat, framelogprob, fwdlattice)\n\n return logsumexp(fwdlattice[-1]), fwdlattice\n"
] | class Pohmm(object):
"""
Partially observable hidden Markov model
"""
def __init__(self,
n_hidden_states=2,
emissions=['normal'],
max_iter=1000,
thresh=1e-6,
init_method='obs',
init_spread=2,
smoothing=None,
random_state=None):
if type(n_hidden_states) is int:
self.n_hidden_states = n_hidden_states
else:
raise Exception('Wrong type for n_hidden_states. Must be int')
if type(emissions[0]) is tuple:
emission_name, emission_distr = zip(*emissions)
elif type(emissions[0]) is str:
emission_name, emission_distr = np.arange(len(emissions)), emissions
for distr in emission_distr:
if distr not in _DISTRIBUTIONS.keys():
raise ValueError('Emission distribution must be one of', _DISTRIBUTIONS.keys())
self.emission_name = emission_name
self.emission_distr = emission_distr
self.emission_name_distr = dict(zip(emission_name, emission_distr))
self.n_features = len(emissions)
# Set up the emission parameters
# emission: {'feature':{'param': np.array(shape=(n_partial_states, n_hidden_states))}}
self.emission = defaultdict(dict)
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
self.emission[name][param] = None
self.emission = dict(self.emission)
assert max_iter >= 0
assert thresh >= 0
self.max_iter = max_iter
self.thresh = thresh
assert init_spread >= 0
self.init_spread = init_spread
if init_method not in _INIT_METHODS:
raise ValueError('init_method must be one of', _INIT_METHODS)
self.init_method = init_method
if smoothing is None:
smoothing = {'transmat': None, 'startprob': None}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = None
elif type(smoothing) is str:
s = smoothing
smoothing = {'transmat': s, 'startprob': s}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = s
elif type(smoothing) is dict:
assert 'transmat' in smoothing.keys()
assert 'startprob' in smoothing.keys()
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
assert param in smoothing.keys() or '%s:%s' % (name, param) in smoothing.keys()
if param in smoothing.keys() and '%s:%s' % (name, param) not in smoothing.keys():
smoothing['%s:%s' % (name, param)] = smoothing[param]
else:
raise Exception('Wrong type for smoothing. Must be None, str, or dict')
self.smoothing = smoothing
self.random_state = random_state
# Number of unique partial states is unknown until fit
self.n_partial_states = None
# Results after fitting the model
self.logprob_ = None
self.n_iter_performed_ = None
self.logprob_delta_ = None
# Mapping between p-states and a unique index
# Defaults to 0 for unknown or missing p-states
self.e = defaultdict(int)
def _get_startprob(self):
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
startprob = normalize(startprob, axis=1)
if len(startprob) != self.n_partial_states:
raise ValueError('startprob must have length n_partial_states')
if not np.allclose(np.sum(startprob, axis=1), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob = property(_get_startprob, _set_startprob)
def _get_steadyprob(self):
return np.exp(self._log_steadyprob)
def _set_steadyprob(self, steadyprob):
if steadyprob is None:
steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
steadyprob = np.asarray(steadyprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(steadyprob):
steadyprob = normalize(steadyprob, axis=1)
if len(steadyprob) != self.n_partial_states:
raise ValueError('steadyprob must have length n_partial_states')
if not np.allclose(np.sum(steadyprob, axis=1), 1.0):
raise ValueError('steadyprob must sum to 1.0')
self._log_steadyprob = np.log(np.asarray(steadyprob).copy())
steadyprob = property(_get_steadyprob, _set_steadyprob)
def _get_transmat(self):
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
transmat = normalize(transmat, axis=3)
if (np.asarray(transmat).shape
!= (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states)):
raise ValueError('transmat must have shape '
'(n_partial_states,n_partial_states,n_hidden_states,n_hidden_states)')
if not np.all(np.allclose(np.sum(transmat, axis=3), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat = property(_get_transmat, _set_transmat)
def _compute_log_likelihood(self, obs, pstates_idx):
q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx]
sigma = self.emission[feature_name]['sigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(
np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(np.maximum(MIN_PROBA,
stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,
scale=np.exp(logmu[:, j]))))
q = q.sum(axis=2)
return q
def _generate_sample_from_state(self, hidden_state, pstates_idx, random_state=None):
sample = np.zeros(self.n_features)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx][hidden_state]
sigma = self.emission[feature_name]['sigma'][pstates_idx][hidden_state]
sample[col] = stats.norm.rvs(loc=mu, scale=sigma, random_state=random_state)
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx][hidden_state]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx][hidden_state]
sample[col] = stats.lognorm.rvs(logsigma, loc=0, scale=np.exp(logmu), random_state=random_state)
return sample
def _init_pstates(self, unique_pstates):
# Map events to a unique index. The unknown p-state is at idx 0
self.e = defaultdict(int)
self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))
self.er = {v: k for k, v in self.e.items()} # Reverse lookup
self.er[0] = UNKNOWN_PSTATE
self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state
return
def _init_pstate_freqs(self, pstates_idx):
# Partial state frequencies
self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])
self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])
# Store freqs for the meta state
self.pstate_freq[0] = len(np.concatenate(pstates_idx))
for seq in pstates_idx:
self.pstate_startprob[seq[0]] += 1
self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)
for idx1, idx2 in zip(seq[:-1], seq[1:]):
self.pstate_trans_freq[(0, 0)] += 1
self.pstate_trans_freq[(idx1, 0)] += 1
self.pstate_trans_freq[(0, idx2)] += 1
self.pstate_transmat[idx1, idx2] += 1
# TODO: separate probas from freqs
# Normalize to get the probabilities, ignore the meta state at idx 0
self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])
self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)
self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])
return
def _init_from_obs(self, obs, pstates_idx):
# Partial state probabilities
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
# obs should be (N*T, n_features)
# N is the number of samples
# T is the size of each sample
obs = np.concatenate(obs)
pstates_idx = np.concatenate(pstates_idx)
# Initialize starting and transition probas
self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# Initialize emission parameters
# Hidden states are ordered by the first feature
feature1 = self.emission_name[0]
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \
obs[:, col].std() * np.linspace(
-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(
obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()
return
def _init_random(self, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)
self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(
size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)
self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])
self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),
random_state=random_state)
transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),
random_state=random_state)
self.transmat = normalize(transmat, axis=3)
# Initialize emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(
self.n_partial_states, self.n_hidden_states))
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],
size=(self.n_partial_states,
self.n_hidden_states))
if self.emission_distr[0] == 'normal':
self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)
elif self.emission_distr[0] == 'lognormal':
self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],
axis=1)
return
def _smooth(self):
self._compute_marginals()
startprob = self.startprob
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing['startprob']:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing['startprob']:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing['startprob']:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing['startprob']:
w_ = 1
w_j = 0
elif self.smoothing['startprob'] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])
startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]
self.startprob = startprob
transmat = self.transmat
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
if 'freq' == self.smoothing['transmat']:
w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])
w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])
w_ij = 1 - (w_i0 + w_0j)
w_ = 0
elif 'proba' == self.smoothing['transmat']:
denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()
w_i0 = self.pstate_transmat[i, :].sum() / denom
w_0j = self.pstate_transmat[:, j].sum() / denom
w_ij = self.pstate_transmat[i, j] / denom
w_ = 0
elif 'exp' == self.smoothing['transmat']:
w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])
w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])
w_ = self.pstate_trans_freq[0, 0] * np.exp(
-(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))
w_ij = self.pstate_trans_freq[i, j]
w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))
elif 'fixed' == self.smoothing['transmat']:
w_i0 = 0
w_0j = 0
w_ = 1
w_ij = 0
elif self.smoothing['transmat'] is None:
w_i0 = 0
w_0j = 0
w_ = 0
w_ij = 1
else:
raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])
assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE
transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \
self.transmat[
0, 0]
self.transmat = transmat
assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[feature_distr]:
key = '%s:%s' % (feature_name, param)
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing[key]:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing[key]:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing[key]:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing[key]:
w_ = 1
w_j = 0
elif self.smoothing[key] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for ' + key)
self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \
self.emission[
feature_name][
param][0]
return
def _initialize_sufficient_statistics(self):
stats = {
'nobs': 0,
'post': np.zeros((self.n_partial_states, self.n_hidden_states)),
'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'start': np.zeros((self.n_partial_states, self.n_hidden_states)),
'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),
'trans': np.zeros(
(self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
}
return stats
def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,
posteriors, fwdlattice, bwdlattice):
stats['nobs'] += 1
n_observations, n_hidden_states = framelogprob.shape
stats['start'][0] += posteriors[0]
for i in range(self.n_partial_states):
if len(np.where(pstates_idx == i)[0]) > 0:
stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))
_hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):
if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:
stats['trans'][i, j] += np.exp(
logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))
for i in range(self.n_partial_states):
stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)
stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])
stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)
stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))
stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)
return
def _compute_marginals(self):
# TODO: cythonize some of this
# Start prob, weighted by p-state start probs
self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)
# Use the p-state transmat and transmat to get the full transmat
full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])
full_steadyprob = steadystate(full_transmat)
# Steady state probas are determined by the full trans mat, need to be updated
steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)
for i in range(self.n_partial_states - 1):
steadyprob[i + 1] = normalize(
full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])
self.steadyprob = steadyprob
# Update the transations to/from the marginal state
transmat = self.transmat
# Group the hidden states within each partial state
for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):
transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()
for pidx in range(self.n_partial_states - 1):
transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,
hidx2::self.n_hidden_states].sum()
transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,
pidx * self.n_hidden_states + hidx2].sum()
self.transmat = normalize(transmat, axis=3)
pweights = self.pstate_steadyprob[1:, np.newaxis]
# Update emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['mu'][1:, :]
sigma = self.emission[feature_name]['sigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['mu'][0, :] = mu_0
self.emission[feature_name]['sigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
if feature_distr == 'lognormal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['logmu'][1:, :]
sigma = self.emission[feature_name]['logsigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['logmu'][0, :] = mu_0
self.emission[feature_name]['logsigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
return
def _do_mstep(self, stats):
self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)
self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['obs'][:, :, col] / denom
cv_num = (stats['obs**2'][:, :, col]
- 2 * mu * stats['obs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['mu'] = mu
self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)
if feature_distr == 'lognormal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['lnobs'][:, :, col] / denom
cv_num = (stats['lnobs**2'][:, :, col]
- 2 * mu * stats['lnobs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['logmu'] = mu
self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)
return
def _do_forward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._forward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._backward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _do_viterbi_pass(self, framelogprob, event_idx):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components,
event_idx, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def rand(self, unique_pstates, random_state=None):
"""
Randomize the POHMM parameters
"""
self._init_pstates(unique_pstates)
self._init_random(random_state=random_state)
self._compute_marginals()
return self
def fit(self, obs, pstates, unique_pstates=None):
"""
Estimate model parameters.
"""
obs = [np.array(o) for o in obs]
pstates = [np.array(p) for p in pstates]
# List or array of observation sequences
assert len(obs) == len(pstates)
assert obs[0].ndim == 2
assert pstates[0].ndim == 1
if unique_pstates is not None:
self._init_pstates(unique_pstates)
else:
self._init_pstates(list(set(np.concatenate(pstates))))
# Map the partial states to a unique index
pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]
if self.init_method == 'rand':
self._init_random()
elif self.init_method == 'obs':
self._init_from_obs(obs, pstates_idx)
self._init_pstate_freqs(pstates_idx)
self._smooth()
logprob = []
for i in range(self.max_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_i, pstates_idx_i in zip(obs, pstates_idx):
framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)
lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)
bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(stats, obs_i,
pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)
logprob.append(curr_logprob)
self.logprob_ = curr_logprob
# Check for convergence.
self.n_iter_performed_ = i
if i > 0:
self.logprob_delta = logprob[-1] - logprob[-2]
if self.logprob_delta < self.thresh:
break
# Maximization step
self._do_mstep(stats)
# Mix the parameters
self._smooth()
return self
def score_events(self, obs, pstates):
"""
Compute the log probability of each event under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
L = logsumexp(fwdlattice, axis=1)
return np.concatenate([L[[0]], np.diff(L)])
def predict_states(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob, pstates_idx)
return viterbi_logprob, state_sequence
def predict(self, obs, pstates, next_pstate=None):
"""
Predict the next observation
"""
assert len(obs) == len(pstates)
pstates_idx = np.array([self.e[ei] for ei in pstates])
next_pstate_idx = self.e[next_pstate]
if len(obs) == 0:
# No history, use the starting probas
next_hstate_prob = self.startprob[next_pstate_idx]
else:
# With a history, determine the hidden state posteriors using
# the last posteriors and transition matrix
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
next_hstate_prob = np.zeros(self.n_hidden_states)
alpha_n = fwdlattice[-1]
vmax = alpha_n.max(axis=0)
alpha_n = np.exp(alpha_n - vmax)
alpha_n = alpha_n / alpha_n.sum()
trans = self.transmat[pstates_idx[-1], next_pstate_idx]
for i in range(self.n_hidden_states):
next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)])
assert next_hstate_prob.sum() - 1 < TOLERANCE
# Make the prediction
prediction = np.array(
[self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in
self.emission_name])
# next_hstate = np.argmax(next_hstate_prob)
# prediction = np.array(
# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in
# self.emission_name])
return prediction
def gen_pstates_idx(self, n, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.pstate_startprob)
transmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial state.
rand = random_state.rand()
curr_pstate = (startprob_cdf > rand).argmax()
pstates = [curr_pstate]
for _ in range(1, n):
rand = random_state.rand()
curr_pstate = (transmat_cdf[curr_pstate] > rand).argmax()
pstates.append(curr_pstate)
return np.array(pstates, dtype=int)
def sample(self, pstates=None, n_obs=None, random_state=None):
"""
"""
random_state = check_random_state(random_state)
if pstates is None and n_obs is None:
raise Exception('Must provide either pstates or n_obs')
if pstates is not None and n_obs is not None:
raise Exception('Must provide either pstates or n_obs but not both')
gen_pstates = False
rand = random_state.rand()
if pstates is None:
gen_pstates = True
pstartprob_cdf = np.cumsum(self.pstate_startprob)
ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial pstate
currpstate = (pstartprob_cdf > rand).argmax()
pstates_idx = [currpstate]
pstates = [self.er[currpstate]]
else:
n_obs = len(pstates)
pstates_idx = np.array([self.e[p] for p in pstates])
startprob_pdf = self.startprob[pstates_idx[0]]
startprob_cdf = np.cumsum(startprob_pdf)
transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)
# Initial hidden state
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]
for i in range(1, n_obs):
rand = random_state.rand()
if gen_pstates:
currpstate = (ptransmat_cdf[currpstate] > rand).argmax()
pstates_idx.append(currpstate)
pstates.append(self.er[currpstate])
transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))
return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)
def fit_df(self, dfs, pstate_col=PSTATE_COL):
"""
Convenience function to fit a model from a list of dataframes
"""
obs_cols = list(self.emission_name)
obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs]
pstates = [df[pstate_col].values for df in dfs]
return self.fit(obs, pstates)
def score_df(self, df, pstate_col=PSTATE_COL):
"""
"""
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.score(obs, pstates)
def score_events_df(self, df, pstate_col=PSTATE_COL, score_col='score'):
"""
"""
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
df[score_col] = self.score_events(obs, pstates)
return df
def predict_states_df(self, df, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
_, df[hstate_col] = self.predict_states(obs, pstates)
return df
def predict_df(self, df, next_pstate=None, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.predict(obs, pstates, next_pstate)
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
"""
Convenience function to generate samples a model and create a dataframe
"""
try:
import pandas as pd
except Exception as e:
raise e
obs, pstates, hstates = self.sample(pstates, n_obs, random_state)
items = []
if pstate_col is not None:
items.append((pstate_col, pstates))
if hstate_col is not None:
items.append((hstate_col, hstates))
items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)]
df = pd.DataFrame.from_items(items)
return df
def __str__(self):
pstates = sorted(self.e.keys())
sep = '-' * 80 + '\n'
sep2 = '_' * 40 + '\n'
out = 'POHMM\n'
out += 'H-states: %d\n' % self.n_hidden_states
out += 'P-states: (%d) %s\n' % (len(pstates), str(pstates))
out += 'Emission: %s\n' % (self.emission_distr)
out += sep
out += 'Transition probas\n'
out += sep2
out += '. -> .\n%s\n' % str(self.transmat[0, 0])
for pstate in pstates:
out += sep2
out += '%s -> .\n%s\n' % (pstate, str(self.transmat[self.e[pstate], 0]))
out += sep2
out += '. -> %s\n%s\n' % (pstate, str(self.transmat[0, self.e[pstate]]))
for pstate1, pstate2 in product(pstates, pstates):
out += sep2
out += '%s -> %s\n%s\n' % (pstate1, pstate2, str(self.transmat[self.e[pstate1], self.e[pstate2]]))
out += sep
out += 'Starting probas\n'
out += '.: %s\n' % str(self.startprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.startprob[self.e[pstate]]))
out += sep
out += 'Steady probas\n'
out += '.: %s\n' % str(self.steadyprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.steadyprob[self.e[pstate]]))
out += sep
out += 'Emissions\n'
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
out += sep2
out += 'Feature %s: %s\n' % (feature_name, feature_distr)
for param in _DISTRIBUTIONS[feature_distr]:
out += '.: %s = %s\n' % (param, str(self.emission[feature_name][param][0]))
for pstate in pstates:
out += '%s: %s = %s\n' % (pstate, param, str(self.emission[feature_name][param][self.e[pstate]]))
out += sep
return out
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return
def pdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * pdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * pdf(xi))
return p
return fn
def cdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * cdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * cdf(xi))
return p
return fn
def params(self, pstates=None):
if pstates is None:
pstates = [None] + sorted(
set(self.er.values())) # TODO: self.e caches any unknown value, maybe it shouldn't?
params = []
# emission parameters
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
for feature, distr in zip(self.emission_name, self.emission_distr):
for feature_param in _DISTRIBUTIONS[distr]:
params.append(self.emission[feature][feature_param][self.e[pstate_label], hstate])
# transition parameters, diagonals only assuming 2 state
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
params.append(self.transmat[self.e[pstate_label], self.e[pstate_label], hstate, hstate])
return np.array(params)
|
vmonaco/pohmm | pohmm/pohmm.py | Pohmm.score_events | python | def score_events(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
L = logsumexp(fwdlattice, axis=1)
return np.concatenate([L[[0]], np.diff(L)]) | Compute the log probability of each event under the model. | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L742-L751 | [
"def logsumexp(arr, axis=0):\n \"\"\"\n Computes the sum of arr assuming arr is in the log domain.\n\n Returns log(sum(exp(arr))) while minimizing the possibility of\n over/underflow.\n \"\"\"\n arr = np.rollaxis(arr, axis)\n # Use the max to normalize, as with the log this is what accumulates\n # the less errors\n vmax = arr.max(axis=0)\n out = np.log(np.sum(np.exp(arr - vmax), axis=0))\n out += vmax\n return out\n",
"def _compute_log_likelihood(self, obs, pstates_idx):\n\n q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))\n\n for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):\n if feature_distr == 'normal':\n mu = self.emission[feature_name]['mu'][pstates_idx]\n sigma = self.emission[feature_name]['sigma'][pstates_idx]\n for j in range(self.n_hidden_states):\n q[:, j, col] = np.log(\n np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))\n if feature_distr == 'lognormal':\n logmu = self.emission[feature_name]['logmu'][pstates_idx]\n logsigma = self.emission[feature_name]['logsigma'][pstates_idx]\n for j in range(self.n_hidden_states):\n q[:, j, col] = np.log(np.maximum(MIN_PROBA,\n stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,\n scale=np.exp(logmu[:, j]))))\n\n q = q.sum(axis=2)\n return q\n",
"def _do_forward_pass(self, framelogprob, event_idx):\n n_observations, n_hidden_states = framelogprob.shape\n fwdlattice = np.zeros((n_observations, n_hidden_states))\n _hmmc._forward(n_observations, n_hidden_states,\n event_idx, self._log_startprob,\n self._log_transmat, framelogprob, fwdlattice)\n\n return logsumexp(fwdlattice[-1]), fwdlattice\n"
] | class Pohmm(object):
"""
Partially observable hidden Markov model
"""
def __init__(self,
n_hidden_states=2,
emissions=['normal'],
max_iter=1000,
thresh=1e-6,
init_method='obs',
init_spread=2,
smoothing=None,
random_state=None):
if type(n_hidden_states) is int:
self.n_hidden_states = n_hidden_states
else:
raise Exception('Wrong type for n_hidden_states. Must be int')
if type(emissions[0]) is tuple:
emission_name, emission_distr = zip(*emissions)
elif type(emissions[0]) is str:
emission_name, emission_distr = np.arange(len(emissions)), emissions
for distr in emission_distr:
if distr not in _DISTRIBUTIONS.keys():
raise ValueError('Emission distribution must be one of', _DISTRIBUTIONS.keys())
self.emission_name = emission_name
self.emission_distr = emission_distr
self.emission_name_distr = dict(zip(emission_name, emission_distr))
self.n_features = len(emissions)
# Set up the emission parameters
# emission: {'feature':{'param': np.array(shape=(n_partial_states, n_hidden_states))}}
self.emission = defaultdict(dict)
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
self.emission[name][param] = None
self.emission = dict(self.emission)
assert max_iter >= 0
assert thresh >= 0
self.max_iter = max_iter
self.thresh = thresh
assert init_spread >= 0
self.init_spread = init_spread
if init_method not in _INIT_METHODS:
raise ValueError('init_method must be one of', _INIT_METHODS)
self.init_method = init_method
if smoothing is None:
smoothing = {'transmat': None, 'startprob': None}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = None
elif type(smoothing) is str:
s = smoothing
smoothing = {'transmat': s, 'startprob': s}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = s
elif type(smoothing) is dict:
assert 'transmat' in smoothing.keys()
assert 'startprob' in smoothing.keys()
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
assert param in smoothing.keys() or '%s:%s' % (name, param) in smoothing.keys()
if param in smoothing.keys() and '%s:%s' % (name, param) not in smoothing.keys():
smoothing['%s:%s' % (name, param)] = smoothing[param]
else:
raise Exception('Wrong type for smoothing. Must be None, str, or dict')
self.smoothing = smoothing
self.random_state = random_state
# Number of unique partial states is unknown until fit
self.n_partial_states = None
# Results after fitting the model
self.logprob_ = None
self.n_iter_performed_ = None
self.logprob_delta_ = None
# Mapping between p-states and a unique index
# Defaults to 0 for unknown or missing p-states
self.e = defaultdict(int)
def _get_startprob(self):
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
startprob = normalize(startprob, axis=1)
if len(startprob) != self.n_partial_states:
raise ValueError('startprob must have length n_partial_states')
if not np.allclose(np.sum(startprob, axis=1), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob = property(_get_startprob, _set_startprob)
def _get_steadyprob(self):
return np.exp(self._log_steadyprob)
def _set_steadyprob(self, steadyprob):
if steadyprob is None:
steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
steadyprob = np.asarray(steadyprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(steadyprob):
steadyprob = normalize(steadyprob, axis=1)
if len(steadyprob) != self.n_partial_states:
raise ValueError('steadyprob must have length n_partial_states')
if not np.allclose(np.sum(steadyprob, axis=1), 1.0):
raise ValueError('steadyprob must sum to 1.0')
self._log_steadyprob = np.log(np.asarray(steadyprob).copy())
steadyprob = property(_get_steadyprob, _set_steadyprob)
def _get_transmat(self):
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
transmat = normalize(transmat, axis=3)
if (np.asarray(transmat).shape
!= (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states)):
raise ValueError('transmat must have shape '
'(n_partial_states,n_partial_states,n_hidden_states,n_hidden_states)')
if not np.all(np.allclose(np.sum(transmat, axis=3), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat = property(_get_transmat, _set_transmat)
def _compute_log_likelihood(self, obs, pstates_idx):
q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx]
sigma = self.emission[feature_name]['sigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(
np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(np.maximum(MIN_PROBA,
stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,
scale=np.exp(logmu[:, j]))))
q = q.sum(axis=2)
return q
def _generate_sample_from_state(self, hidden_state, pstates_idx, random_state=None):
sample = np.zeros(self.n_features)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx][hidden_state]
sigma = self.emission[feature_name]['sigma'][pstates_idx][hidden_state]
sample[col] = stats.norm.rvs(loc=mu, scale=sigma, random_state=random_state)
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx][hidden_state]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx][hidden_state]
sample[col] = stats.lognorm.rvs(logsigma, loc=0, scale=np.exp(logmu), random_state=random_state)
return sample
def _init_pstates(self, unique_pstates):
# Map events to a unique index. The unknown p-state is at idx 0
self.e = defaultdict(int)
self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))
self.er = {v: k for k, v in self.e.items()} # Reverse lookup
self.er[0] = UNKNOWN_PSTATE
self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state
return
def _init_pstate_freqs(self, pstates_idx):
# Partial state frequencies
self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])
self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])
# Store freqs for the meta state
self.pstate_freq[0] = len(np.concatenate(pstates_idx))
for seq in pstates_idx:
self.pstate_startprob[seq[0]] += 1
self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)
for idx1, idx2 in zip(seq[:-1], seq[1:]):
self.pstate_trans_freq[(0, 0)] += 1
self.pstate_trans_freq[(idx1, 0)] += 1
self.pstate_trans_freq[(0, idx2)] += 1
self.pstate_transmat[idx1, idx2] += 1
# TODO: separate probas from freqs
# Normalize to get the probabilities, ignore the meta state at idx 0
self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])
self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)
self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])
return
def _init_from_obs(self, obs, pstates_idx):
# Partial state probabilities
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
# obs should be (N*T, n_features)
# N is the number of samples
# T is the size of each sample
obs = np.concatenate(obs)
pstates_idx = np.concatenate(pstates_idx)
# Initialize starting and transition probas
self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# Initialize emission parameters
# Hidden states are ordered by the first feature
feature1 = self.emission_name[0]
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \
obs[:, col].std() * np.linspace(
-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(
obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()
return
def _init_random(self, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)
self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(
size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)
self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])
self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),
random_state=random_state)
transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),
random_state=random_state)
self.transmat = normalize(transmat, axis=3)
# Initialize emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(
self.n_partial_states, self.n_hidden_states))
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],
size=(self.n_partial_states,
self.n_hidden_states))
if self.emission_distr[0] == 'normal':
self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)
elif self.emission_distr[0] == 'lognormal':
self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],
axis=1)
return
def _smooth(self):
self._compute_marginals()
startprob = self.startprob
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing['startprob']:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing['startprob']:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing['startprob']:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing['startprob']:
w_ = 1
w_j = 0
elif self.smoothing['startprob'] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])
startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]
self.startprob = startprob
transmat = self.transmat
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
if 'freq' == self.smoothing['transmat']:
w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])
w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])
w_ij = 1 - (w_i0 + w_0j)
w_ = 0
elif 'proba' == self.smoothing['transmat']:
denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()
w_i0 = self.pstate_transmat[i, :].sum() / denom
w_0j = self.pstate_transmat[:, j].sum() / denom
w_ij = self.pstate_transmat[i, j] / denom
w_ = 0
elif 'exp' == self.smoothing['transmat']:
w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])
w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])
w_ = self.pstate_trans_freq[0, 0] * np.exp(
-(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))
w_ij = self.pstate_trans_freq[i, j]
w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))
elif 'fixed' == self.smoothing['transmat']:
w_i0 = 0
w_0j = 0
w_ = 1
w_ij = 0
elif self.smoothing['transmat'] is None:
w_i0 = 0
w_0j = 0
w_ = 0
w_ij = 1
else:
raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])
assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE
transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \
self.transmat[
0, 0]
self.transmat = transmat
assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[feature_distr]:
key = '%s:%s' % (feature_name, param)
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing[key]:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing[key]:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing[key]:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing[key]:
w_ = 1
w_j = 0
elif self.smoothing[key] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for ' + key)
self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \
self.emission[
feature_name][
param][0]
return
def _initialize_sufficient_statistics(self):
stats = {
'nobs': 0,
'post': np.zeros((self.n_partial_states, self.n_hidden_states)),
'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'start': np.zeros((self.n_partial_states, self.n_hidden_states)),
'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),
'trans': np.zeros(
(self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
}
return stats
def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,
posteriors, fwdlattice, bwdlattice):
stats['nobs'] += 1
n_observations, n_hidden_states = framelogprob.shape
stats['start'][0] += posteriors[0]
for i in range(self.n_partial_states):
if len(np.where(pstates_idx == i)[0]) > 0:
stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))
_hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):
if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:
stats['trans'][i, j] += np.exp(
logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))
for i in range(self.n_partial_states):
stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)
stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])
stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)
stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))
stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)
return
def _compute_marginals(self):
# TODO: cythonize some of this
# Start prob, weighted by p-state start probs
self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)
# Use the p-state transmat and transmat to get the full transmat
full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])
full_steadyprob = steadystate(full_transmat)
# Steady state probas are determined by the full trans mat, need to be updated
steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)
for i in range(self.n_partial_states - 1):
steadyprob[i + 1] = normalize(
full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])
self.steadyprob = steadyprob
# Update the transations to/from the marginal state
transmat = self.transmat
# Group the hidden states within each partial state
for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):
transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()
for pidx in range(self.n_partial_states - 1):
transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,
hidx2::self.n_hidden_states].sum()
transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,
pidx * self.n_hidden_states + hidx2].sum()
self.transmat = normalize(transmat, axis=3)
pweights = self.pstate_steadyprob[1:, np.newaxis]
# Update emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['mu'][1:, :]
sigma = self.emission[feature_name]['sigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['mu'][0, :] = mu_0
self.emission[feature_name]['sigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
if feature_distr == 'lognormal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['logmu'][1:, :]
sigma = self.emission[feature_name]['logsigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['logmu'][0, :] = mu_0
self.emission[feature_name]['logsigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
return
def _do_mstep(self, stats):
self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)
self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['obs'][:, :, col] / denom
cv_num = (stats['obs**2'][:, :, col]
- 2 * mu * stats['obs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['mu'] = mu
self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)
if feature_distr == 'lognormal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['lnobs'][:, :, col] / denom
cv_num = (stats['lnobs**2'][:, :, col]
- 2 * mu * stats['lnobs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['logmu'] = mu
self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)
return
def _do_forward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._forward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._backward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _do_viterbi_pass(self, framelogprob, event_idx):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components,
event_idx, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def rand(self, unique_pstates, random_state=None):
"""
Randomize the POHMM parameters
"""
self._init_pstates(unique_pstates)
self._init_random(random_state=random_state)
self._compute_marginals()
return self
def fit(self, obs, pstates, unique_pstates=None):
"""
Estimate model parameters.
"""
obs = [np.array(o) for o in obs]
pstates = [np.array(p) for p in pstates]
# List or array of observation sequences
assert len(obs) == len(pstates)
assert obs[0].ndim == 2
assert pstates[0].ndim == 1
if unique_pstates is not None:
self._init_pstates(unique_pstates)
else:
self._init_pstates(list(set(np.concatenate(pstates))))
# Map the partial states to a unique index
pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]
if self.init_method == 'rand':
self._init_random()
elif self.init_method == 'obs':
self._init_from_obs(obs, pstates_idx)
self._init_pstate_freqs(pstates_idx)
self._smooth()
logprob = []
for i in range(self.max_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_i, pstates_idx_i in zip(obs, pstates_idx):
framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)
lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)
bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(stats, obs_i,
pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)
logprob.append(curr_logprob)
self.logprob_ = curr_logprob
# Check for convergence.
self.n_iter_performed_ = i
if i > 0:
self.logprob_delta = logprob[-1] - logprob[-2]
if self.logprob_delta < self.thresh:
break
# Maximization step
self._do_mstep(stats)
# Mix the parameters
self._smooth()
return self
def score(self, obs, pstates):
"""
Compute the log probability under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
def predict_states(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob, pstates_idx)
return viterbi_logprob, state_sequence
def predict(self, obs, pstates, next_pstate=None):
"""
Predict the next observation
"""
assert len(obs) == len(pstates)
pstates_idx = np.array([self.e[ei] for ei in pstates])
next_pstate_idx = self.e[next_pstate]
if len(obs) == 0:
# No history, use the starting probas
next_hstate_prob = self.startprob[next_pstate_idx]
else:
# With a history, determine the hidden state posteriors using
# the last posteriors and transition matrix
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
next_hstate_prob = np.zeros(self.n_hidden_states)
alpha_n = fwdlattice[-1]
vmax = alpha_n.max(axis=0)
alpha_n = np.exp(alpha_n - vmax)
alpha_n = alpha_n / alpha_n.sum()
trans = self.transmat[pstates_idx[-1], next_pstate_idx]
for i in range(self.n_hidden_states):
next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)])
assert next_hstate_prob.sum() - 1 < TOLERANCE
# Make the prediction
prediction = np.array(
[self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in
self.emission_name])
# next_hstate = np.argmax(next_hstate_prob)
# prediction = np.array(
# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in
# self.emission_name])
return prediction
def gen_pstates_idx(self, n, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.pstate_startprob)
transmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial state.
rand = random_state.rand()
curr_pstate = (startprob_cdf > rand).argmax()
pstates = [curr_pstate]
for _ in range(1, n):
rand = random_state.rand()
curr_pstate = (transmat_cdf[curr_pstate] > rand).argmax()
pstates.append(curr_pstate)
return np.array(pstates, dtype=int)
def sample(self, pstates=None, n_obs=None, random_state=None):
"""
"""
random_state = check_random_state(random_state)
if pstates is None and n_obs is None:
raise Exception('Must provide either pstates or n_obs')
if pstates is not None and n_obs is not None:
raise Exception('Must provide either pstates or n_obs but not both')
gen_pstates = False
rand = random_state.rand()
if pstates is None:
gen_pstates = True
pstartprob_cdf = np.cumsum(self.pstate_startprob)
ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial pstate
currpstate = (pstartprob_cdf > rand).argmax()
pstates_idx = [currpstate]
pstates = [self.er[currpstate]]
else:
n_obs = len(pstates)
pstates_idx = np.array([self.e[p] for p in pstates])
startprob_pdf = self.startprob[pstates_idx[0]]
startprob_cdf = np.cumsum(startprob_pdf)
transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)
# Initial hidden state
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]
for i in range(1, n_obs):
rand = random_state.rand()
if gen_pstates:
currpstate = (ptransmat_cdf[currpstate] > rand).argmax()
pstates_idx.append(currpstate)
pstates.append(self.er[currpstate])
transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))
return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)
def fit_df(self, dfs, pstate_col=PSTATE_COL):
"""
Convenience function to fit a model from a list of dataframes
"""
obs_cols = list(self.emission_name)
obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs]
pstates = [df[pstate_col].values for df in dfs]
return self.fit(obs, pstates)
def score_df(self, df, pstate_col=PSTATE_COL):
"""
"""
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.score(obs, pstates)
def score_events_df(self, df, pstate_col=PSTATE_COL, score_col='score'):
"""
"""
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
df[score_col] = self.score_events(obs, pstates)
return df
def predict_states_df(self, df, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
_, df[hstate_col] = self.predict_states(obs, pstates)
return df
def predict_df(self, df, next_pstate=None, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.predict(obs, pstates, next_pstate)
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
"""
Convenience function to generate samples a model and create a dataframe
"""
try:
import pandas as pd
except Exception as e:
raise e
obs, pstates, hstates = self.sample(pstates, n_obs, random_state)
items = []
if pstate_col is not None:
items.append((pstate_col, pstates))
if hstate_col is not None:
items.append((hstate_col, hstates))
items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)]
df = pd.DataFrame.from_items(items)
return df
def __str__(self):
pstates = sorted(self.e.keys())
sep = '-' * 80 + '\n'
sep2 = '_' * 40 + '\n'
out = 'POHMM\n'
out += 'H-states: %d\n' % self.n_hidden_states
out += 'P-states: (%d) %s\n' % (len(pstates), str(pstates))
out += 'Emission: %s\n' % (self.emission_distr)
out += sep
out += 'Transition probas\n'
out += sep2
out += '. -> .\n%s\n' % str(self.transmat[0, 0])
for pstate in pstates:
out += sep2
out += '%s -> .\n%s\n' % (pstate, str(self.transmat[self.e[pstate], 0]))
out += sep2
out += '. -> %s\n%s\n' % (pstate, str(self.transmat[0, self.e[pstate]]))
for pstate1, pstate2 in product(pstates, pstates):
out += sep2
out += '%s -> %s\n%s\n' % (pstate1, pstate2, str(self.transmat[self.e[pstate1], self.e[pstate2]]))
out += sep
out += 'Starting probas\n'
out += '.: %s\n' % str(self.startprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.startprob[self.e[pstate]]))
out += sep
out += 'Steady probas\n'
out += '.: %s\n' % str(self.steadyprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.steadyprob[self.e[pstate]]))
out += sep
out += 'Emissions\n'
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
out += sep2
out += 'Feature %s: %s\n' % (feature_name, feature_distr)
for param in _DISTRIBUTIONS[feature_distr]:
out += '.: %s = %s\n' % (param, str(self.emission[feature_name][param][0]))
for pstate in pstates:
out += '%s: %s = %s\n' % (pstate, param, str(self.emission[feature_name][param][self.e[pstate]]))
out += sep
return out
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return
def pdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * pdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * pdf(xi))
return p
return fn
def cdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * cdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * cdf(xi))
return p
return fn
def params(self, pstates=None):
if pstates is None:
pstates = [None] + sorted(
set(self.er.values())) # TODO: self.e caches any unknown value, maybe it shouldn't?
params = []
# emission parameters
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
for feature, distr in zip(self.emission_name, self.emission_distr):
for feature_param in _DISTRIBUTIONS[distr]:
params.append(self.emission[feature][feature_param][self.e[pstate_label], hstate])
# transition parameters, diagonals only assuming 2 state
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
params.append(self.transmat[self.e[pstate_label], self.e[pstate_label], hstate, hstate])
return np.array(params)
|
vmonaco/pohmm | pohmm/pohmm.py | Pohmm.predict | python | def predict(self, obs, pstates, next_pstate=None):
assert len(obs) == len(pstates)
pstates_idx = np.array([self.e[ei] for ei in pstates])
next_pstate_idx = self.e[next_pstate]
if len(obs) == 0:
# No history, use the starting probas
next_hstate_prob = self.startprob[next_pstate_idx]
else:
# With a history, determine the hidden state posteriors using
# the last posteriors and transition matrix
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
next_hstate_prob = np.zeros(self.n_hidden_states)
alpha_n = fwdlattice[-1]
vmax = alpha_n.max(axis=0)
alpha_n = np.exp(alpha_n - vmax)
alpha_n = alpha_n / alpha_n.sum()
trans = self.transmat[pstates_idx[-1], next_pstate_idx]
for i in range(self.n_hidden_states):
next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)])
assert next_hstate_prob.sum() - 1 < TOLERANCE
# Make the prediction
prediction = np.array(
[self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in
self.emission_name])
# next_hstate = np.argmax(next_hstate_prob)
# prediction = np.array(
# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in
# self.emission_name])
return prediction | Predict the next observation | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L759-L800 | [
"def _compute_log_likelihood(self, obs, pstates_idx):\n\n q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))\n\n for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):\n if feature_distr == 'normal':\n mu = self.emission[feature_name]['mu'][pstates_idx]\n sigma = self.emission[feature_name]['sigma'][pstates_idx]\n for j in range(self.n_hidden_states):\n q[:, j, col] = np.log(\n np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))\n if feature_distr == 'lognormal':\n logmu = self.emission[feature_name]['logmu'][pstates_idx]\n logsigma = self.emission[feature_name]['logsigma'][pstates_idx]\n for j in range(self.n_hidden_states):\n q[:, j, col] = np.log(np.maximum(MIN_PROBA,\n stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,\n scale=np.exp(logmu[:, j]))))\n\n q = q.sum(axis=2)\n return q\n",
"def _do_forward_pass(self, framelogprob, event_idx):\n n_observations, n_hidden_states = framelogprob.shape\n fwdlattice = np.zeros((n_observations, n_hidden_states))\n _hmmc._forward(n_observations, n_hidden_states,\n event_idx, self._log_startprob,\n self._log_transmat, framelogprob, fwdlattice)\n\n return logsumexp(fwdlattice[-1]), fwdlattice\n"
] | class Pohmm(object):
"""
Partially observable hidden Markov model
"""
def __init__(self,
n_hidden_states=2,
emissions=['normal'],
max_iter=1000,
thresh=1e-6,
init_method='obs',
init_spread=2,
smoothing=None,
random_state=None):
if type(n_hidden_states) is int:
self.n_hidden_states = n_hidden_states
else:
raise Exception('Wrong type for n_hidden_states. Must be int')
if type(emissions[0]) is tuple:
emission_name, emission_distr = zip(*emissions)
elif type(emissions[0]) is str:
emission_name, emission_distr = np.arange(len(emissions)), emissions
for distr in emission_distr:
if distr not in _DISTRIBUTIONS.keys():
raise ValueError('Emission distribution must be one of', _DISTRIBUTIONS.keys())
self.emission_name = emission_name
self.emission_distr = emission_distr
self.emission_name_distr = dict(zip(emission_name, emission_distr))
self.n_features = len(emissions)
# Set up the emission parameters
# emission: {'feature':{'param': np.array(shape=(n_partial_states, n_hidden_states))}}
self.emission = defaultdict(dict)
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
self.emission[name][param] = None
self.emission = dict(self.emission)
assert max_iter >= 0
assert thresh >= 0
self.max_iter = max_iter
self.thresh = thresh
assert init_spread >= 0
self.init_spread = init_spread
if init_method not in _INIT_METHODS:
raise ValueError('init_method must be one of', _INIT_METHODS)
self.init_method = init_method
if smoothing is None:
smoothing = {'transmat': None, 'startprob': None}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = None
elif type(smoothing) is str:
s = smoothing
smoothing = {'transmat': s, 'startprob': s}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = s
elif type(smoothing) is dict:
assert 'transmat' in smoothing.keys()
assert 'startprob' in smoothing.keys()
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
assert param in smoothing.keys() or '%s:%s' % (name, param) in smoothing.keys()
if param in smoothing.keys() and '%s:%s' % (name, param) not in smoothing.keys():
smoothing['%s:%s' % (name, param)] = smoothing[param]
else:
raise Exception('Wrong type for smoothing. Must be None, str, or dict')
self.smoothing = smoothing
self.random_state = random_state
# Number of unique partial states is unknown until fit
self.n_partial_states = None
# Results after fitting the model
self.logprob_ = None
self.n_iter_performed_ = None
self.logprob_delta_ = None
# Mapping between p-states and a unique index
# Defaults to 0 for unknown or missing p-states
self.e = defaultdict(int)
def _get_startprob(self):
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
startprob = normalize(startprob, axis=1)
if len(startprob) != self.n_partial_states:
raise ValueError('startprob must have length n_partial_states')
if not np.allclose(np.sum(startprob, axis=1), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob = property(_get_startprob, _set_startprob)
def _get_steadyprob(self):
return np.exp(self._log_steadyprob)
def _set_steadyprob(self, steadyprob):
if steadyprob is None:
steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
steadyprob = np.asarray(steadyprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(steadyprob):
steadyprob = normalize(steadyprob, axis=1)
if len(steadyprob) != self.n_partial_states:
raise ValueError('steadyprob must have length n_partial_states')
if not np.allclose(np.sum(steadyprob, axis=1), 1.0):
raise ValueError('steadyprob must sum to 1.0')
self._log_steadyprob = np.log(np.asarray(steadyprob).copy())
steadyprob = property(_get_steadyprob, _set_steadyprob)
def _get_transmat(self):
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
transmat = normalize(transmat, axis=3)
if (np.asarray(transmat).shape
!= (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states)):
raise ValueError('transmat must have shape '
'(n_partial_states,n_partial_states,n_hidden_states,n_hidden_states)')
if not np.all(np.allclose(np.sum(transmat, axis=3), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat = property(_get_transmat, _set_transmat)
def _compute_log_likelihood(self, obs, pstates_idx):
q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx]
sigma = self.emission[feature_name]['sigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(
np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(np.maximum(MIN_PROBA,
stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,
scale=np.exp(logmu[:, j]))))
q = q.sum(axis=2)
return q
def _generate_sample_from_state(self, hidden_state, pstates_idx, random_state=None):
sample = np.zeros(self.n_features)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx][hidden_state]
sigma = self.emission[feature_name]['sigma'][pstates_idx][hidden_state]
sample[col] = stats.norm.rvs(loc=mu, scale=sigma, random_state=random_state)
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx][hidden_state]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx][hidden_state]
sample[col] = stats.lognorm.rvs(logsigma, loc=0, scale=np.exp(logmu), random_state=random_state)
return sample
def _init_pstates(self, unique_pstates):
# Map events to a unique index. The unknown p-state is at idx 0
self.e = defaultdict(int)
self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))
self.er = {v: k for k, v in self.e.items()} # Reverse lookup
self.er[0] = UNKNOWN_PSTATE
self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state
return
def _init_pstate_freqs(self, pstates_idx):
# Partial state frequencies
self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])
self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])
# Store freqs for the meta state
self.pstate_freq[0] = len(np.concatenate(pstates_idx))
for seq in pstates_idx:
self.pstate_startprob[seq[0]] += 1
self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)
for idx1, idx2 in zip(seq[:-1], seq[1:]):
self.pstate_trans_freq[(0, 0)] += 1
self.pstate_trans_freq[(idx1, 0)] += 1
self.pstate_trans_freq[(0, idx2)] += 1
self.pstate_transmat[idx1, idx2] += 1
# TODO: separate probas from freqs
# Normalize to get the probabilities, ignore the meta state at idx 0
self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])
self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)
self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])
return
def _init_from_obs(self, obs, pstates_idx):
# Partial state probabilities
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
# obs should be (N*T, n_features)
# N is the number of samples
# T is the size of each sample
obs = np.concatenate(obs)
pstates_idx = np.concatenate(pstates_idx)
# Initialize starting and transition probas
self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# Initialize emission parameters
# Hidden states are ordered by the first feature
feature1 = self.emission_name[0]
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \
obs[:, col].std() * np.linspace(
-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(
obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()
return
def _init_random(self, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)
self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(
size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)
self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])
self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),
random_state=random_state)
transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),
random_state=random_state)
self.transmat = normalize(transmat, axis=3)
# Initialize emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(
self.n_partial_states, self.n_hidden_states))
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],
size=(self.n_partial_states,
self.n_hidden_states))
if self.emission_distr[0] == 'normal':
self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)
elif self.emission_distr[0] == 'lognormal':
self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],
axis=1)
return
def _smooth(self):
self._compute_marginals()
startprob = self.startprob
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing['startprob']:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing['startprob']:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing['startprob']:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing['startprob']:
w_ = 1
w_j = 0
elif self.smoothing['startprob'] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])
startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]
self.startprob = startprob
transmat = self.transmat
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
if 'freq' == self.smoothing['transmat']:
w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])
w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])
w_ij = 1 - (w_i0 + w_0j)
w_ = 0
elif 'proba' == self.smoothing['transmat']:
denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()
w_i0 = self.pstate_transmat[i, :].sum() / denom
w_0j = self.pstate_transmat[:, j].sum() / denom
w_ij = self.pstate_transmat[i, j] / denom
w_ = 0
elif 'exp' == self.smoothing['transmat']:
w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])
w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])
w_ = self.pstate_trans_freq[0, 0] * np.exp(
-(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))
w_ij = self.pstate_trans_freq[i, j]
w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))
elif 'fixed' == self.smoothing['transmat']:
w_i0 = 0
w_0j = 0
w_ = 1
w_ij = 0
elif self.smoothing['transmat'] is None:
w_i0 = 0
w_0j = 0
w_ = 0
w_ij = 1
else:
raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])
assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE
transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \
self.transmat[
0, 0]
self.transmat = transmat
assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[feature_distr]:
key = '%s:%s' % (feature_name, param)
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing[key]:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing[key]:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing[key]:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing[key]:
w_ = 1
w_j = 0
elif self.smoothing[key] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for ' + key)
self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \
self.emission[
feature_name][
param][0]
return
def _initialize_sufficient_statistics(self):
stats = {
'nobs': 0,
'post': np.zeros((self.n_partial_states, self.n_hidden_states)),
'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'start': np.zeros((self.n_partial_states, self.n_hidden_states)),
'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),
'trans': np.zeros(
(self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
}
return stats
def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,
posteriors, fwdlattice, bwdlattice):
stats['nobs'] += 1
n_observations, n_hidden_states = framelogprob.shape
stats['start'][0] += posteriors[0]
for i in range(self.n_partial_states):
if len(np.where(pstates_idx == i)[0]) > 0:
stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))
_hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):
if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:
stats['trans'][i, j] += np.exp(
logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))
for i in range(self.n_partial_states):
stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)
stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])
stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)
stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))
stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)
return
def _compute_marginals(self):
# TODO: cythonize some of this
# Start prob, weighted by p-state start probs
self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)
# Use the p-state transmat and transmat to get the full transmat
full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])
full_steadyprob = steadystate(full_transmat)
# Steady state probas are determined by the full trans mat, need to be updated
steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)
for i in range(self.n_partial_states - 1):
steadyprob[i + 1] = normalize(
full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])
self.steadyprob = steadyprob
# Update the transations to/from the marginal state
transmat = self.transmat
# Group the hidden states within each partial state
for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):
transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()
for pidx in range(self.n_partial_states - 1):
transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,
hidx2::self.n_hidden_states].sum()
transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,
pidx * self.n_hidden_states + hidx2].sum()
self.transmat = normalize(transmat, axis=3)
pweights = self.pstate_steadyprob[1:, np.newaxis]
# Update emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['mu'][1:, :]
sigma = self.emission[feature_name]['sigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['mu'][0, :] = mu_0
self.emission[feature_name]['sigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
if feature_distr == 'lognormal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['logmu'][1:, :]
sigma = self.emission[feature_name]['logsigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['logmu'][0, :] = mu_0
self.emission[feature_name]['logsigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
return
def _do_mstep(self, stats):
self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)
self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['obs'][:, :, col] / denom
cv_num = (stats['obs**2'][:, :, col]
- 2 * mu * stats['obs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['mu'] = mu
self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)
if feature_distr == 'lognormal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['lnobs'][:, :, col] / denom
cv_num = (stats['lnobs**2'][:, :, col]
- 2 * mu * stats['lnobs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['logmu'] = mu
self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)
return
def _do_forward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._forward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._backward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _do_viterbi_pass(self, framelogprob, event_idx):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components,
event_idx, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def rand(self, unique_pstates, random_state=None):
"""
Randomize the POHMM parameters
"""
self._init_pstates(unique_pstates)
self._init_random(random_state=random_state)
self._compute_marginals()
return self
def fit(self, obs, pstates, unique_pstates=None):
"""
Estimate model parameters.
"""
obs = [np.array(o) for o in obs]
pstates = [np.array(p) for p in pstates]
# List or array of observation sequences
assert len(obs) == len(pstates)
assert obs[0].ndim == 2
assert pstates[0].ndim == 1
if unique_pstates is not None:
self._init_pstates(unique_pstates)
else:
self._init_pstates(list(set(np.concatenate(pstates))))
# Map the partial states to a unique index
pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]
if self.init_method == 'rand':
self._init_random()
elif self.init_method == 'obs':
self._init_from_obs(obs, pstates_idx)
self._init_pstate_freqs(pstates_idx)
self._smooth()
logprob = []
for i in range(self.max_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_i, pstates_idx_i in zip(obs, pstates_idx):
framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)
lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)
bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(stats, obs_i,
pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)
logprob.append(curr_logprob)
self.logprob_ = curr_logprob
# Check for convergence.
self.n_iter_performed_ = i
if i > 0:
self.logprob_delta = logprob[-1] - logprob[-2]
if self.logprob_delta < self.thresh:
break
# Maximization step
self._do_mstep(stats)
# Mix the parameters
self._smooth()
return self
def score(self, obs, pstates):
"""
Compute the log probability under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
def score_events(self, obs, pstates):
"""
Compute the log probability of each event under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
L = logsumexp(fwdlattice, axis=1)
return np.concatenate([L[[0]], np.diff(L)])
def predict_states(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob, pstates_idx)
return viterbi_logprob, state_sequence
def gen_pstates_idx(self, n, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.pstate_startprob)
transmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial state.
rand = random_state.rand()
curr_pstate = (startprob_cdf > rand).argmax()
pstates = [curr_pstate]
for _ in range(1, n):
rand = random_state.rand()
curr_pstate = (transmat_cdf[curr_pstate] > rand).argmax()
pstates.append(curr_pstate)
return np.array(pstates, dtype=int)
def sample(self, pstates=None, n_obs=None, random_state=None):
"""
"""
random_state = check_random_state(random_state)
if pstates is None and n_obs is None:
raise Exception('Must provide either pstates or n_obs')
if pstates is not None and n_obs is not None:
raise Exception('Must provide either pstates or n_obs but not both')
gen_pstates = False
rand = random_state.rand()
if pstates is None:
gen_pstates = True
pstartprob_cdf = np.cumsum(self.pstate_startprob)
ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial pstate
currpstate = (pstartprob_cdf > rand).argmax()
pstates_idx = [currpstate]
pstates = [self.er[currpstate]]
else:
n_obs = len(pstates)
pstates_idx = np.array([self.e[p] for p in pstates])
startprob_pdf = self.startprob[pstates_idx[0]]
startprob_cdf = np.cumsum(startprob_pdf)
transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)
# Initial hidden state
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]
for i in range(1, n_obs):
rand = random_state.rand()
if gen_pstates:
currpstate = (ptransmat_cdf[currpstate] > rand).argmax()
pstates_idx.append(currpstate)
pstates.append(self.er[currpstate])
transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))
return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)
def fit_df(self, dfs, pstate_col=PSTATE_COL):
"""
Convenience function to fit a model from a list of dataframes
"""
obs_cols = list(self.emission_name)
obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs]
pstates = [df[pstate_col].values for df in dfs]
return self.fit(obs, pstates)
def score_df(self, df, pstate_col=PSTATE_COL):
"""
"""
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.score(obs, pstates)
def score_events_df(self, df, pstate_col=PSTATE_COL, score_col='score'):
"""
"""
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
df[score_col] = self.score_events(obs, pstates)
return df
def predict_states_df(self, df, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
_, df[hstate_col] = self.predict_states(obs, pstates)
return df
def predict_df(self, df, next_pstate=None, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.predict(obs, pstates, next_pstate)
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
"""
Convenience function to generate samples a model and create a dataframe
"""
try:
import pandas as pd
except Exception as e:
raise e
obs, pstates, hstates = self.sample(pstates, n_obs, random_state)
items = []
if pstate_col is not None:
items.append((pstate_col, pstates))
if hstate_col is not None:
items.append((hstate_col, hstates))
items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)]
df = pd.DataFrame.from_items(items)
return df
def __str__(self):
pstates = sorted(self.e.keys())
sep = '-' * 80 + '\n'
sep2 = '_' * 40 + '\n'
out = 'POHMM\n'
out += 'H-states: %d\n' % self.n_hidden_states
out += 'P-states: (%d) %s\n' % (len(pstates), str(pstates))
out += 'Emission: %s\n' % (self.emission_distr)
out += sep
out += 'Transition probas\n'
out += sep2
out += '. -> .\n%s\n' % str(self.transmat[0, 0])
for pstate in pstates:
out += sep2
out += '%s -> .\n%s\n' % (pstate, str(self.transmat[self.e[pstate], 0]))
out += sep2
out += '. -> %s\n%s\n' % (pstate, str(self.transmat[0, self.e[pstate]]))
for pstate1, pstate2 in product(pstates, pstates):
out += sep2
out += '%s -> %s\n%s\n' % (pstate1, pstate2, str(self.transmat[self.e[pstate1], self.e[pstate2]]))
out += sep
out += 'Starting probas\n'
out += '.: %s\n' % str(self.startprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.startprob[self.e[pstate]]))
out += sep
out += 'Steady probas\n'
out += '.: %s\n' % str(self.steadyprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.steadyprob[self.e[pstate]]))
out += sep
out += 'Emissions\n'
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
out += sep2
out += 'Feature %s: %s\n' % (feature_name, feature_distr)
for param in _DISTRIBUTIONS[feature_distr]:
out += '.: %s = %s\n' % (param, str(self.emission[feature_name][param][0]))
for pstate in pstates:
out += '%s: %s = %s\n' % (pstate, param, str(self.emission[feature_name][param][self.e[pstate]]))
out += sep
return out
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return
def pdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * pdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * pdf(xi))
return p
return fn
def cdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * cdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * cdf(xi))
return p
return fn
def params(self, pstates=None):
if pstates is None:
pstates = [None] + sorted(
set(self.er.values())) # TODO: self.e caches any unknown value, maybe it shouldn't?
params = []
# emission parameters
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
for feature, distr in zip(self.emission_name, self.emission_distr):
for feature_param in _DISTRIBUTIONS[distr]:
params.append(self.emission[feature][feature_param][self.e[pstate_label], hstate])
# transition parameters, diagonals only assuming 2 state
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
params.append(self.transmat[self.e[pstate_label], self.e[pstate_label], hstate, hstate])
return np.array(params)
|
vmonaco/pohmm | pohmm/pohmm.py | Pohmm.fit_df | python | def fit_df(self, dfs, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs]
pstates = [df[pstate_col].values for df in dfs]
return self.fit(obs, pstates) | Convenience function to fit a model from a list of dataframes | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L876-L883 | [
"def fit(self, obs, pstates, unique_pstates=None):\n \"\"\"\n Estimate model parameters.\n \"\"\"\n obs = [np.array(o) for o in obs]\n pstates = [np.array(p) for p in pstates]\n\n # List or array of observation sequences\n assert len(obs) == len(pstates)\n assert obs[0].ndim == 2\n assert pstates[0].ndim == 1\n\n if unique_pstates is not None:\n self._init_pstates(unique_pstates)\n else:\n self._init_pstates(list(set(np.concatenate(pstates))))\n\n # Map the partial states to a unique index\n pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]\n\n if self.init_method == 'rand':\n self._init_random()\n elif self.init_method == 'obs':\n self._init_from_obs(obs, pstates_idx)\n\n self._init_pstate_freqs(pstates_idx)\n self._smooth()\n\n logprob = []\n for i in range(self.max_iter):\n # Expectation step\n stats = self._initialize_sufficient_statistics()\n curr_logprob = 0\n for obs_i, pstates_idx_i in zip(obs, pstates_idx):\n framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)\n lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)\n bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)\n gamma = fwdlattice + bwdlattice\n posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T\n curr_logprob += lpr\n\n self._accumulate_sufficient_statistics(stats, obs_i,\n pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)\n\n logprob.append(curr_logprob)\n self.logprob_ = curr_logprob\n\n # Check for convergence.\n self.n_iter_performed_ = i\n if i > 0:\n self.logprob_delta = logprob[-1] - logprob[-2]\n if self.logprob_delta < self.thresh:\n break\n\n # Maximization step\n self._do_mstep(stats)\n\n # Mix the parameters\n self._smooth()\n\n return self\n"
] | class Pohmm(object):
"""
Partially observable hidden Markov model
"""
def __init__(self,
n_hidden_states=2,
emissions=['normal'],
max_iter=1000,
thresh=1e-6,
init_method='obs',
init_spread=2,
smoothing=None,
random_state=None):
if type(n_hidden_states) is int:
self.n_hidden_states = n_hidden_states
else:
raise Exception('Wrong type for n_hidden_states. Must be int')
if type(emissions[0]) is tuple:
emission_name, emission_distr = zip(*emissions)
elif type(emissions[0]) is str:
emission_name, emission_distr = np.arange(len(emissions)), emissions
for distr in emission_distr:
if distr not in _DISTRIBUTIONS.keys():
raise ValueError('Emission distribution must be one of', _DISTRIBUTIONS.keys())
self.emission_name = emission_name
self.emission_distr = emission_distr
self.emission_name_distr = dict(zip(emission_name, emission_distr))
self.n_features = len(emissions)
# Set up the emission parameters
# emission: {'feature':{'param': np.array(shape=(n_partial_states, n_hidden_states))}}
self.emission = defaultdict(dict)
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
self.emission[name][param] = None
self.emission = dict(self.emission)
assert max_iter >= 0
assert thresh >= 0
self.max_iter = max_iter
self.thresh = thresh
assert init_spread >= 0
self.init_spread = init_spread
if init_method not in _INIT_METHODS:
raise ValueError('init_method must be one of', _INIT_METHODS)
self.init_method = init_method
if smoothing is None:
smoothing = {'transmat': None, 'startprob': None}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = None
elif type(smoothing) is str:
s = smoothing
smoothing = {'transmat': s, 'startprob': s}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = s
elif type(smoothing) is dict:
assert 'transmat' in smoothing.keys()
assert 'startprob' in smoothing.keys()
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
assert param in smoothing.keys() or '%s:%s' % (name, param) in smoothing.keys()
if param in smoothing.keys() and '%s:%s' % (name, param) not in smoothing.keys():
smoothing['%s:%s' % (name, param)] = smoothing[param]
else:
raise Exception('Wrong type for smoothing. Must be None, str, or dict')
self.smoothing = smoothing
self.random_state = random_state
# Number of unique partial states is unknown until fit
self.n_partial_states = None
# Results after fitting the model
self.logprob_ = None
self.n_iter_performed_ = None
self.logprob_delta_ = None
# Mapping between p-states and a unique index
# Defaults to 0 for unknown or missing p-states
self.e = defaultdict(int)
def _get_startprob(self):
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
startprob = normalize(startprob, axis=1)
if len(startprob) != self.n_partial_states:
raise ValueError('startprob must have length n_partial_states')
if not np.allclose(np.sum(startprob, axis=1), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob = property(_get_startprob, _set_startprob)
def _get_steadyprob(self):
return np.exp(self._log_steadyprob)
def _set_steadyprob(self, steadyprob):
if steadyprob is None:
steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
steadyprob = np.asarray(steadyprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(steadyprob):
steadyprob = normalize(steadyprob, axis=1)
if len(steadyprob) != self.n_partial_states:
raise ValueError('steadyprob must have length n_partial_states')
if not np.allclose(np.sum(steadyprob, axis=1), 1.0):
raise ValueError('steadyprob must sum to 1.0')
self._log_steadyprob = np.log(np.asarray(steadyprob).copy())
steadyprob = property(_get_steadyprob, _set_steadyprob)
def _get_transmat(self):
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
transmat = normalize(transmat, axis=3)
if (np.asarray(transmat).shape
!= (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states)):
raise ValueError('transmat must have shape '
'(n_partial_states,n_partial_states,n_hidden_states,n_hidden_states)')
if not np.all(np.allclose(np.sum(transmat, axis=3), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat = property(_get_transmat, _set_transmat)
def _compute_log_likelihood(self, obs, pstates_idx):
q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx]
sigma = self.emission[feature_name]['sigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(
np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(np.maximum(MIN_PROBA,
stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,
scale=np.exp(logmu[:, j]))))
q = q.sum(axis=2)
return q
def _generate_sample_from_state(self, hidden_state, pstates_idx, random_state=None):
sample = np.zeros(self.n_features)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx][hidden_state]
sigma = self.emission[feature_name]['sigma'][pstates_idx][hidden_state]
sample[col] = stats.norm.rvs(loc=mu, scale=sigma, random_state=random_state)
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx][hidden_state]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx][hidden_state]
sample[col] = stats.lognorm.rvs(logsigma, loc=0, scale=np.exp(logmu), random_state=random_state)
return sample
def _init_pstates(self, unique_pstates):
# Map events to a unique index. The unknown p-state is at idx 0
self.e = defaultdict(int)
self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))
self.er = {v: k for k, v in self.e.items()} # Reverse lookup
self.er[0] = UNKNOWN_PSTATE
self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state
return
def _init_pstate_freqs(self, pstates_idx):
# Partial state frequencies
self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])
self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])
# Store freqs for the meta state
self.pstate_freq[0] = len(np.concatenate(pstates_idx))
for seq in pstates_idx:
self.pstate_startprob[seq[0]] += 1
self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)
for idx1, idx2 in zip(seq[:-1], seq[1:]):
self.pstate_trans_freq[(0, 0)] += 1
self.pstate_trans_freq[(idx1, 0)] += 1
self.pstate_trans_freq[(0, idx2)] += 1
self.pstate_transmat[idx1, idx2] += 1
# TODO: separate probas from freqs
# Normalize to get the probabilities, ignore the meta state at idx 0
self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])
self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)
self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])
return
def _init_from_obs(self, obs, pstates_idx):
# Partial state probabilities
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
# obs should be (N*T, n_features)
# N is the number of samples
# T is the size of each sample
obs = np.concatenate(obs)
pstates_idx = np.concatenate(pstates_idx)
# Initialize starting and transition probas
self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# Initialize emission parameters
# Hidden states are ordered by the first feature
feature1 = self.emission_name[0]
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \
obs[:, col].std() * np.linspace(
-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(
obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()
return
def _init_random(self, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)
self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(
size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)
self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])
self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),
random_state=random_state)
transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),
random_state=random_state)
self.transmat = normalize(transmat, axis=3)
# Initialize emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(
self.n_partial_states, self.n_hidden_states))
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],
size=(self.n_partial_states,
self.n_hidden_states))
if self.emission_distr[0] == 'normal':
self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)
elif self.emission_distr[0] == 'lognormal':
self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],
axis=1)
return
def _smooth(self):
self._compute_marginals()
startprob = self.startprob
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing['startprob']:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing['startprob']:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing['startprob']:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing['startprob']:
w_ = 1
w_j = 0
elif self.smoothing['startprob'] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])
startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]
self.startprob = startprob
transmat = self.transmat
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
if 'freq' == self.smoothing['transmat']:
w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])
w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])
w_ij = 1 - (w_i0 + w_0j)
w_ = 0
elif 'proba' == self.smoothing['transmat']:
denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()
w_i0 = self.pstate_transmat[i, :].sum() / denom
w_0j = self.pstate_transmat[:, j].sum() / denom
w_ij = self.pstate_transmat[i, j] / denom
w_ = 0
elif 'exp' == self.smoothing['transmat']:
w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])
w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])
w_ = self.pstate_trans_freq[0, 0] * np.exp(
-(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))
w_ij = self.pstate_trans_freq[i, j]
w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))
elif 'fixed' == self.smoothing['transmat']:
w_i0 = 0
w_0j = 0
w_ = 1
w_ij = 0
elif self.smoothing['transmat'] is None:
w_i0 = 0
w_0j = 0
w_ = 0
w_ij = 1
else:
raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])
assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE
transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \
self.transmat[
0, 0]
self.transmat = transmat
assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[feature_distr]:
key = '%s:%s' % (feature_name, param)
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing[key]:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing[key]:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing[key]:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing[key]:
w_ = 1
w_j = 0
elif self.smoothing[key] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for ' + key)
self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \
self.emission[
feature_name][
param][0]
return
def _initialize_sufficient_statistics(self):
stats = {
'nobs': 0,
'post': np.zeros((self.n_partial_states, self.n_hidden_states)),
'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'start': np.zeros((self.n_partial_states, self.n_hidden_states)),
'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),
'trans': np.zeros(
(self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
}
return stats
def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,
posteriors, fwdlattice, bwdlattice):
stats['nobs'] += 1
n_observations, n_hidden_states = framelogprob.shape
stats['start'][0] += posteriors[0]
for i in range(self.n_partial_states):
if len(np.where(pstates_idx == i)[0]) > 0:
stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))
_hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):
if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:
stats['trans'][i, j] += np.exp(
logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))
for i in range(self.n_partial_states):
stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)
stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])
stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)
stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))
stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)
return
def _compute_marginals(self):
# TODO: cythonize some of this
# Start prob, weighted by p-state start probs
self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)
# Use the p-state transmat and transmat to get the full transmat
full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])
full_steadyprob = steadystate(full_transmat)
# Steady state probas are determined by the full trans mat, need to be updated
steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)
for i in range(self.n_partial_states - 1):
steadyprob[i + 1] = normalize(
full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])
self.steadyprob = steadyprob
# Update the transations to/from the marginal state
transmat = self.transmat
# Group the hidden states within each partial state
for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):
transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()
for pidx in range(self.n_partial_states - 1):
transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,
hidx2::self.n_hidden_states].sum()
transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,
pidx * self.n_hidden_states + hidx2].sum()
self.transmat = normalize(transmat, axis=3)
pweights = self.pstate_steadyprob[1:, np.newaxis]
# Update emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['mu'][1:, :]
sigma = self.emission[feature_name]['sigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['mu'][0, :] = mu_0
self.emission[feature_name]['sigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
if feature_distr == 'lognormal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['logmu'][1:, :]
sigma = self.emission[feature_name]['logsigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['logmu'][0, :] = mu_0
self.emission[feature_name]['logsigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
return
def _do_mstep(self, stats):
self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)
self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['obs'][:, :, col] / denom
cv_num = (stats['obs**2'][:, :, col]
- 2 * mu * stats['obs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['mu'] = mu
self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)
if feature_distr == 'lognormal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['lnobs'][:, :, col] / denom
cv_num = (stats['lnobs**2'][:, :, col]
- 2 * mu * stats['lnobs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['logmu'] = mu
self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)
return
def _do_forward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._forward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._backward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _do_viterbi_pass(self, framelogprob, event_idx):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components,
event_idx, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def rand(self, unique_pstates, random_state=None):
"""
Randomize the POHMM parameters
"""
self._init_pstates(unique_pstates)
self._init_random(random_state=random_state)
self._compute_marginals()
return self
def fit(self, obs, pstates, unique_pstates=None):
"""
Estimate model parameters.
"""
obs = [np.array(o) for o in obs]
pstates = [np.array(p) for p in pstates]
# List or array of observation sequences
assert len(obs) == len(pstates)
assert obs[0].ndim == 2
assert pstates[0].ndim == 1
if unique_pstates is not None:
self._init_pstates(unique_pstates)
else:
self._init_pstates(list(set(np.concatenate(pstates))))
# Map the partial states to a unique index
pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]
if self.init_method == 'rand':
self._init_random()
elif self.init_method == 'obs':
self._init_from_obs(obs, pstates_idx)
self._init_pstate_freqs(pstates_idx)
self._smooth()
logprob = []
for i in range(self.max_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_i, pstates_idx_i in zip(obs, pstates_idx):
framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)
lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)
bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(stats, obs_i,
pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)
logprob.append(curr_logprob)
self.logprob_ = curr_logprob
# Check for convergence.
self.n_iter_performed_ = i
if i > 0:
self.logprob_delta = logprob[-1] - logprob[-2]
if self.logprob_delta < self.thresh:
break
# Maximization step
self._do_mstep(stats)
# Mix the parameters
self._smooth()
return self
def score(self, obs, pstates):
"""
Compute the log probability under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
def score_events(self, obs, pstates):
"""
Compute the log probability of each event under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
L = logsumexp(fwdlattice, axis=1)
return np.concatenate([L[[0]], np.diff(L)])
def predict_states(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob, pstates_idx)
return viterbi_logprob, state_sequence
def predict(self, obs, pstates, next_pstate=None):
"""
Predict the next observation
"""
assert len(obs) == len(pstates)
pstates_idx = np.array([self.e[ei] for ei in pstates])
next_pstate_idx = self.e[next_pstate]
if len(obs) == 0:
# No history, use the starting probas
next_hstate_prob = self.startprob[next_pstate_idx]
else:
# With a history, determine the hidden state posteriors using
# the last posteriors and transition matrix
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
next_hstate_prob = np.zeros(self.n_hidden_states)
alpha_n = fwdlattice[-1]
vmax = alpha_n.max(axis=0)
alpha_n = np.exp(alpha_n - vmax)
alpha_n = alpha_n / alpha_n.sum()
trans = self.transmat[pstates_idx[-1], next_pstate_idx]
for i in range(self.n_hidden_states):
next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)])
assert next_hstate_prob.sum() - 1 < TOLERANCE
# Make the prediction
prediction = np.array(
[self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in
self.emission_name])
# next_hstate = np.argmax(next_hstate_prob)
# prediction = np.array(
# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in
# self.emission_name])
return prediction
def gen_pstates_idx(self, n, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.pstate_startprob)
transmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial state.
rand = random_state.rand()
curr_pstate = (startprob_cdf > rand).argmax()
pstates = [curr_pstate]
for _ in range(1, n):
rand = random_state.rand()
curr_pstate = (transmat_cdf[curr_pstate] > rand).argmax()
pstates.append(curr_pstate)
return np.array(pstates, dtype=int)
def sample(self, pstates=None, n_obs=None, random_state=None):
"""
"""
random_state = check_random_state(random_state)
if pstates is None and n_obs is None:
raise Exception('Must provide either pstates or n_obs')
if pstates is not None and n_obs is not None:
raise Exception('Must provide either pstates or n_obs but not both')
gen_pstates = False
rand = random_state.rand()
if pstates is None:
gen_pstates = True
pstartprob_cdf = np.cumsum(self.pstate_startprob)
ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial pstate
currpstate = (pstartprob_cdf > rand).argmax()
pstates_idx = [currpstate]
pstates = [self.er[currpstate]]
else:
n_obs = len(pstates)
pstates_idx = np.array([self.e[p] for p in pstates])
startprob_pdf = self.startprob[pstates_idx[0]]
startprob_cdf = np.cumsum(startprob_pdf)
transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)
# Initial hidden state
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]
for i in range(1, n_obs):
rand = random_state.rand()
if gen_pstates:
currpstate = (ptransmat_cdf[currpstate] > rand).argmax()
pstates_idx.append(currpstate)
pstates.append(self.er[currpstate])
transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))
return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)
def score_df(self, df, pstate_col=PSTATE_COL):
"""
"""
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.score(obs, pstates)
def score_events_df(self, df, pstate_col=PSTATE_COL, score_col='score'):
"""
"""
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
df[score_col] = self.score_events(obs, pstates)
return df
def predict_states_df(self, df, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
_, df[hstate_col] = self.predict_states(obs, pstates)
return df
def predict_df(self, df, next_pstate=None, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.predict(obs, pstates, next_pstate)
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
"""
Convenience function to generate samples a model and create a dataframe
"""
try:
import pandas as pd
except Exception as e:
raise e
obs, pstates, hstates = self.sample(pstates, n_obs, random_state)
items = []
if pstate_col is not None:
items.append((pstate_col, pstates))
if hstate_col is not None:
items.append((hstate_col, hstates))
items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)]
df = pd.DataFrame.from_items(items)
return df
def __str__(self):
pstates = sorted(self.e.keys())
sep = '-' * 80 + '\n'
sep2 = '_' * 40 + '\n'
out = 'POHMM\n'
out += 'H-states: %d\n' % self.n_hidden_states
out += 'P-states: (%d) %s\n' % (len(pstates), str(pstates))
out += 'Emission: %s\n' % (self.emission_distr)
out += sep
out += 'Transition probas\n'
out += sep2
out += '. -> .\n%s\n' % str(self.transmat[0, 0])
for pstate in pstates:
out += sep2
out += '%s -> .\n%s\n' % (pstate, str(self.transmat[self.e[pstate], 0]))
out += sep2
out += '. -> %s\n%s\n' % (pstate, str(self.transmat[0, self.e[pstate]]))
for pstate1, pstate2 in product(pstates, pstates):
out += sep2
out += '%s -> %s\n%s\n' % (pstate1, pstate2, str(self.transmat[self.e[pstate1], self.e[pstate2]]))
out += sep
out += 'Starting probas\n'
out += '.: %s\n' % str(self.startprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.startprob[self.e[pstate]]))
out += sep
out += 'Steady probas\n'
out += '.: %s\n' % str(self.steadyprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.steadyprob[self.e[pstate]]))
out += sep
out += 'Emissions\n'
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
out += sep2
out += 'Feature %s: %s\n' % (feature_name, feature_distr)
for param in _DISTRIBUTIONS[feature_distr]:
out += '.: %s = %s\n' % (param, str(self.emission[feature_name][param][0]))
for pstate in pstates:
out += '%s: %s = %s\n' % (pstate, param, str(self.emission[feature_name][param][self.e[pstate]]))
out += sep
return out
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return
def pdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * pdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * pdf(xi))
return p
return fn
def cdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * cdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * cdf(xi))
return p
return fn
def params(self, pstates=None):
if pstates is None:
pstates = [None] + sorted(
set(self.er.values())) # TODO: self.e caches any unknown value, maybe it shouldn't?
params = []
# emission parameters
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
for feature, distr in zip(self.emission_name, self.emission_distr):
for feature_param in _DISTRIBUTIONS[distr]:
params.append(self.emission[feature][feature_param][self.e[pstate_label], hstate])
# transition parameters, diagonals only assuming 2 state
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
params.append(self.transmat[self.e[pstate_label], self.e[pstate_label], hstate, hstate])
return np.array(params)
|
vmonaco/pohmm | pohmm/pohmm.py | Pohmm.sample_df | python | def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
try:
import pandas as pd
except Exception as e:
raise e
obs, pstates, hstates = self.sample(pstates, n_obs, random_state)
items = []
if pstate_col is not None:
items.append((pstate_col, pstates))
if hstate_col is not None:
items.append((hstate_col, hstates))
items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)]
df = pd.DataFrame.from_items(items)
return df | Convenience function to generate samples a model and create a dataframe | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L919-L939 | [
"def sample(self, pstates=None, n_obs=None, random_state=None):\n \"\"\"\n\n \"\"\"\n random_state = check_random_state(random_state)\n\n if pstates is None and n_obs is None:\n raise Exception('Must provide either pstates or n_obs')\n\n if pstates is not None and n_obs is not None:\n raise Exception('Must provide either pstates or n_obs but not both')\n\n gen_pstates = False\n rand = random_state.rand()\n if pstates is None:\n gen_pstates = True\n pstartprob_cdf = np.cumsum(self.pstate_startprob)\n ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)\n\n # Initial pstate\n currpstate = (pstartprob_cdf > rand).argmax()\n pstates_idx = [currpstate]\n pstates = [self.er[currpstate]]\n else:\n n_obs = len(pstates)\n pstates_idx = np.array([self.e[p] for p in pstates])\n\n startprob_pdf = self.startprob[pstates_idx[0]]\n startprob_cdf = np.cumsum(startprob_pdf)\n transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)\n\n # Initial hidden state\n rand = random_state.rand()\n currstate = (startprob_cdf > rand).argmax()\n hidden_states = [currstate]\n obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]\n\n for i in range(1, n_obs):\n rand = random_state.rand()\n\n if gen_pstates:\n currpstate = (ptransmat_cdf[currpstate] > rand).argmax()\n pstates_idx.append(currpstate)\n pstates.append(self.er[currpstate])\n\n transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)\n rand = random_state.rand()\n currstate = (transmat_cdf[currstate] > rand).argmax()\n hidden_states.append(currstate)\n obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))\n\n return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)\n"
] | class Pohmm(object):
"""
Partially observable hidden Markov model
"""
def __init__(self,
n_hidden_states=2,
emissions=['normal'],
max_iter=1000,
thresh=1e-6,
init_method='obs',
init_spread=2,
smoothing=None,
random_state=None):
if type(n_hidden_states) is int:
self.n_hidden_states = n_hidden_states
else:
raise Exception('Wrong type for n_hidden_states. Must be int')
if type(emissions[0]) is tuple:
emission_name, emission_distr = zip(*emissions)
elif type(emissions[0]) is str:
emission_name, emission_distr = np.arange(len(emissions)), emissions
for distr in emission_distr:
if distr not in _DISTRIBUTIONS.keys():
raise ValueError('Emission distribution must be one of', _DISTRIBUTIONS.keys())
self.emission_name = emission_name
self.emission_distr = emission_distr
self.emission_name_distr = dict(zip(emission_name, emission_distr))
self.n_features = len(emissions)
# Set up the emission parameters
# emission: {'feature':{'param': np.array(shape=(n_partial_states, n_hidden_states))}}
self.emission = defaultdict(dict)
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
self.emission[name][param] = None
self.emission = dict(self.emission)
assert max_iter >= 0
assert thresh >= 0
self.max_iter = max_iter
self.thresh = thresh
assert init_spread >= 0
self.init_spread = init_spread
if init_method not in _INIT_METHODS:
raise ValueError('init_method must be one of', _INIT_METHODS)
self.init_method = init_method
if smoothing is None:
smoothing = {'transmat': None, 'startprob': None}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = None
elif type(smoothing) is str:
s = smoothing
smoothing = {'transmat': s, 'startprob': s}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = s
elif type(smoothing) is dict:
assert 'transmat' in smoothing.keys()
assert 'startprob' in smoothing.keys()
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
assert param in smoothing.keys() or '%s:%s' % (name, param) in smoothing.keys()
if param in smoothing.keys() and '%s:%s' % (name, param) not in smoothing.keys():
smoothing['%s:%s' % (name, param)] = smoothing[param]
else:
raise Exception('Wrong type for smoothing. Must be None, str, or dict')
self.smoothing = smoothing
self.random_state = random_state
# Number of unique partial states is unknown until fit
self.n_partial_states = None
# Results after fitting the model
self.logprob_ = None
self.n_iter_performed_ = None
self.logprob_delta_ = None
# Mapping between p-states and a unique index
# Defaults to 0 for unknown or missing p-states
self.e = defaultdict(int)
def _get_startprob(self):
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
startprob = normalize(startprob, axis=1)
if len(startprob) != self.n_partial_states:
raise ValueError('startprob must have length n_partial_states')
if not np.allclose(np.sum(startprob, axis=1), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob = property(_get_startprob, _set_startprob)
def _get_steadyprob(self):
return np.exp(self._log_steadyprob)
def _set_steadyprob(self, steadyprob):
if steadyprob is None:
steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
steadyprob = np.asarray(steadyprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(steadyprob):
steadyprob = normalize(steadyprob, axis=1)
if len(steadyprob) != self.n_partial_states:
raise ValueError('steadyprob must have length n_partial_states')
if not np.allclose(np.sum(steadyprob, axis=1), 1.0):
raise ValueError('steadyprob must sum to 1.0')
self._log_steadyprob = np.log(np.asarray(steadyprob).copy())
steadyprob = property(_get_steadyprob, _set_steadyprob)
def _get_transmat(self):
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
transmat = normalize(transmat, axis=3)
if (np.asarray(transmat).shape
!= (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states)):
raise ValueError('transmat must have shape '
'(n_partial_states,n_partial_states,n_hidden_states,n_hidden_states)')
if not np.all(np.allclose(np.sum(transmat, axis=3), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat = property(_get_transmat, _set_transmat)
def _compute_log_likelihood(self, obs, pstates_idx):
q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx]
sigma = self.emission[feature_name]['sigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(
np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(np.maximum(MIN_PROBA,
stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,
scale=np.exp(logmu[:, j]))))
q = q.sum(axis=2)
return q
def _generate_sample_from_state(self, hidden_state, pstates_idx, random_state=None):
sample = np.zeros(self.n_features)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx][hidden_state]
sigma = self.emission[feature_name]['sigma'][pstates_idx][hidden_state]
sample[col] = stats.norm.rvs(loc=mu, scale=sigma, random_state=random_state)
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx][hidden_state]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx][hidden_state]
sample[col] = stats.lognorm.rvs(logsigma, loc=0, scale=np.exp(logmu), random_state=random_state)
return sample
def _init_pstates(self, unique_pstates):
# Map events to a unique index. The unknown p-state is at idx 0
self.e = defaultdict(int)
self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))
self.er = {v: k for k, v in self.e.items()} # Reverse lookup
self.er[0] = UNKNOWN_PSTATE
self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state
return
def _init_pstate_freqs(self, pstates_idx):
# Partial state frequencies
self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])
self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])
# Store freqs for the meta state
self.pstate_freq[0] = len(np.concatenate(pstates_idx))
for seq in pstates_idx:
self.pstate_startprob[seq[0]] += 1
self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)
for idx1, idx2 in zip(seq[:-1], seq[1:]):
self.pstate_trans_freq[(0, 0)] += 1
self.pstate_trans_freq[(idx1, 0)] += 1
self.pstate_trans_freq[(0, idx2)] += 1
self.pstate_transmat[idx1, idx2] += 1
# TODO: separate probas from freqs
# Normalize to get the probabilities, ignore the meta state at idx 0
self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])
self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)
self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])
return
def _init_from_obs(self, obs, pstates_idx):
# Partial state probabilities
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
# obs should be (N*T, n_features)
# N is the number of samples
# T is the size of each sample
obs = np.concatenate(obs)
pstates_idx = np.concatenate(pstates_idx)
# Initialize starting and transition probas
self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# Initialize emission parameters
# Hidden states are ordered by the first feature
feature1 = self.emission_name[0]
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \
obs[:, col].std() * np.linspace(
-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(
obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()
return
def _init_random(self, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)
self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(
size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)
self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])
self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),
random_state=random_state)
transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),
random_state=random_state)
self.transmat = normalize(transmat, axis=3)
# Initialize emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(
self.n_partial_states, self.n_hidden_states))
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],
size=(self.n_partial_states,
self.n_hidden_states))
if self.emission_distr[0] == 'normal':
self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)
elif self.emission_distr[0] == 'lognormal':
self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],
axis=1)
return
def _smooth(self):
self._compute_marginals()
startprob = self.startprob
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing['startprob']:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing['startprob']:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing['startprob']:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing['startprob']:
w_ = 1
w_j = 0
elif self.smoothing['startprob'] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])
startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]
self.startprob = startprob
transmat = self.transmat
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
if 'freq' == self.smoothing['transmat']:
w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])
w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])
w_ij = 1 - (w_i0 + w_0j)
w_ = 0
elif 'proba' == self.smoothing['transmat']:
denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()
w_i0 = self.pstate_transmat[i, :].sum() / denom
w_0j = self.pstate_transmat[:, j].sum() / denom
w_ij = self.pstate_transmat[i, j] / denom
w_ = 0
elif 'exp' == self.smoothing['transmat']:
w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])
w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])
w_ = self.pstate_trans_freq[0, 0] * np.exp(
-(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))
w_ij = self.pstate_trans_freq[i, j]
w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))
elif 'fixed' == self.smoothing['transmat']:
w_i0 = 0
w_0j = 0
w_ = 1
w_ij = 0
elif self.smoothing['transmat'] is None:
w_i0 = 0
w_0j = 0
w_ = 0
w_ij = 1
else:
raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])
assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE
transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \
self.transmat[
0, 0]
self.transmat = transmat
assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[feature_distr]:
key = '%s:%s' % (feature_name, param)
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing[key]:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing[key]:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing[key]:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing[key]:
w_ = 1
w_j = 0
elif self.smoothing[key] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for ' + key)
self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \
self.emission[
feature_name][
param][0]
return
def _initialize_sufficient_statistics(self):
stats = {
'nobs': 0,
'post': np.zeros((self.n_partial_states, self.n_hidden_states)),
'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'start': np.zeros((self.n_partial_states, self.n_hidden_states)),
'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),
'trans': np.zeros(
(self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
}
return stats
def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,
posteriors, fwdlattice, bwdlattice):
stats['nobs'] += 1
n_observations, n_hidden_states = framelogprob.shape
stats['start'][0] += posteriors[0]
for i in range(self.n_partial_states):
if len(np.where(pstates_idx == i)[0]) > 0:
stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))
_hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):
if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:
stats['trans'][i, j] += np.exp(
logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))
for i in range(self.n_partial_states):
stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)
stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])
stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)
stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))
stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)
return
def _compute_marginals(self):
# TODO: cythonize some of this
# Start prob, weighted by p-state start probs
self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)
# Use the p-state transmat and transmat to get the full transmat
full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])
full_steadyprob = steadystate(full_transmat)
# Steady state probas are determined by the full trans mat, need to be updated
steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)
for i in range(self.n_partial_states - 1):
steadyprob[i + 1] = normalize(
full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])
self.steadyprob = steadyprob
# Update the transations to/from the marginal state
transmat = self.transmat
# Group the hidden states within each partial state
for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):
transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()
for pidx in range(self.n_partial_states - 1):
transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,
hidx2::self.n_hidden_states].sum()
transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,
pidx * self.n_hidden_states + hidx2].sum()
self.transmat = normalize(transmat, axis=3)
pweights = self.pstate_steadyprob[1:, np.newaxis]
# Update emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['mu'][1:, :]
sigma = self.emission[feature_name]['sigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['mu'][0, :] = mu_0
self.emission[feature_name]['sigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
if feature_distr == 'lognormal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['logmu'][1:, :]
sigma = self.emission[feature_name]['logsigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['logmu'][0, :] = mu_0
self.emission[feature_name]['logsigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
return
def _do_mstep(self, stats):
self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)
self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['obs'][:, :, col] / denom
cv_num = (stats['obs**2'][:, :, col]
- 2 * mu * stats['obs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['mu'] = mu
self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)
if feature_distr == 'lognormal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['lnobs'][:, :, col] / denom
cv_num = (stats['lnobs**2'][:, :, col]
- 2 * mu * stats['lnobs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['logmu'] = mu
self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)
return
def _do_forward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._forward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._backward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _do_viterbi_pass(self, framelogprob, event_idx):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components,
event_idx, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def rand(self, unique_pstates, random_state=None):
"""
Randomize the POHMM parameters
"""
self._init_pstates(unique_pstates)
self._init_random(random_state=random_state)
self._compute_marginals()
return self
def fit(self, obs, pstates, unique_pstates=None):
"""
Estimate model parameters.
"""
obs = [np.array(o) for o in obs]
pstates = [np.array(p) for p in pstates]
# List or array of observation sequences
assert len(obs) == len(pstates)
assert obs[0].ndim == 2
assert pstates[0].ndim == 1
if unique_pstates is not None:
self._init_pstates(unique_pstates)
else:
self._init_pstates(list(set(np.concatenate(pstates))))
# Map the partial states to a unique index
pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]
if self.init_method == 'rand':
self._init_random()
elif self.init_method == 'obs':
self._init_from_obs(obs, pstates_idx)
self._init_pstate_freqs(pstates_idx)
self._smooth()
logprob = []
for i in range(self.max_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_i, pstates_idx_i in zip(obs, pstates_idx):
framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)
lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)
bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(stats, obs_i,
pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)
logprob.append(curr_logprob)
self.logprob_ = curr_logprob
# Check for convergence.
self.n_iter_performed_ = i
if i > 0:
self.logprob_delta = logprob[-1] - logprob[-2]
if self.logprob_delta < self.thresh:
break
# Maximization step
self._do_mstep(stats)
# Mix the parameters
self._smooth()
return self
def score(self, obs, pstates):
"""
Compute the log probability under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
def score_events(self, obs, pstates):
"""
Compute the log probability of each event under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
L = logsumexp(fwdlattice, axis=1)
return np.concatenate([L[[0]], np.diff(L)])
def predict_states(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob, pstates_idx)
return viterbi_logprob, state_sequence
def predict(self, obs, pstates, next_pstate=None):
"""
Predict the next observation
"""
assert len(obs) == len(pstates)
pstates_idx = np.array([self.e[ei] for ei in pstates])
next_pstate_idx = self.e[next_pstate]
if len(obs) == 0:
# No history, use the starting probas
next_hstate_prob = self.startprob[next_pstate_idx]
else:
# With a history, determine the hidden state posteriors using
# the last posteriors and transition matrix
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
next_hstate_prob = np.zeros(self.n_hidden_states)
alpha_n = fwdlattice[-1]
vmax = alpha_n.max(axis=0)
alpha_n = np.exp(alpha_n - vmax)
alpha_n = alpha_n / alpha_n.sum()
trans = self.transmat[pstates_idx[-1], next_pstate_idx]
for i in range(self.n_hidden_states):
next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)])
assert next_hstate_prob.sum() - 1 < TOLERANCE
# Make the prediction
prediction = np.array(
[self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in
self.emission_name])
# next_hstate = np.argmax(next_hstate_prob)
# prediction = np.array(
# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in
# self.emission_name])
return prediction
def gen_pstates_idx(self, n, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.pstate_startprob)
transmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial state.
rand = random_state.rand()
curr_pstate = (startprob_cdf > rand).argmax()
pstates = [curr_pstate]
for _ in range(1, n):
rand = random_state.rand()
curr_pstate = (transmat_cdf[curr_pstate] > rand).argmax()
pstates.append(curr_pstate)
return np.array(pstates, dtype=int)
def sample(self, pstates=None, n_obs=None, random_state=None):
"""
"""
random_state = check_random_state(random_state)
if pstates is None and n_obs is None:
raise Exception('Must provide either pstates or n_obs')
if pstates is not None and n_obs is not None:
raise Exception('Must provide either pstates or n_obs but not both')
gen_pstates = False
rand = random_state.rand()
if pstates is None:
gen_pstates = True
pstartprob_cdf = np.cumsum(self.pstate_startprob)
ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial pstate
currpstate = (pstartprob_cdf > rand).argmax()
pstates_idx = [currpstate]
pstates = [self.er[currpstate]]
else:
n_obs = len(pstates)
pstates_idx = np.array([self.e[p] for p in pstates])
startprob_pdf = self.startprob[pstates_idx[0]]
startprob_cdf = np.cumsum(startprob_pdf)
transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)
# Initial hidden state
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]
for i in range(1, n_obs):
rand = random_state.rand()
if gen_pstates:
currpstate = (ptransmat_cdf[currpstate] > rand).argmax()
pstates_idx.append(currpstate)
pstates.append(self.er[currpstate])
transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))
return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)
def fit_df(self, dfs, pstate_col=PSTATE_COL):
"""
Convenience function to fit a model from a list of dataframes
"""
obs_cols = list(self.emission_name)
obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs]
pstates = [df[pstate_col].values for df in dfs]
return self.fit(obs, pstates)
def score_df(self, df, pstate_col=PSTATE_COL):
"""
"""
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.score(obs, pstates)
def score_events_df(self, df, pstate_col=PSTATE_COL, score_col='score'):
"""
"""
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
df[score_col] = self.score_events(obs, pstates)
return df
def predict_states_df(self, df, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
_, df[hstate_col] = self.predict_states(obs, pstates)
return df
def predict_df(self, df, next_pstate=None, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.predict(obs, pstates, next_pstate)
def __str__(self):
pstates = sorted(self.e.keys())
sep = '-' * 80 + '\n'
sep2 = '_' * 40 + '\n'
out = 'POHMM\n'
out += 'H-states: %d\n' % self.n_hidden_states
out += 'P-states: (%d) %s\n' % (len(pstates), str(pstates))
out += 'Emission: %s\n' % (self.emission_distr)
out += sep
out += 'Transition probas\n'
out += sep2
out += '. -> .\n%s\n' % str(self.transmat[0, 0])
for pstate in pstates:
out += sep2
out += '%s -> .\n%s\n' % (pstate, str(self.transmat[self.e[pstate], 0]))
out += sep2
out += '. -> %s\n%s\n' % (pstate, str(self.transmat[0, self.e[pstate]]))
for pstate1, pstate2 in product(pstates, pstates):
out += sep2
out += '%s -> %s\n%s\n' % (pstate1, pstate2, str(self.transmat[self.e[pstate1], self.e[pstate2]]))
out += sep
out += 'Starting probas\n'
out += '.: %s\n' % str(self.startprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.startprob[self.e[pstate]]))
out += sep
out += 'Steady probas\n'
out += '.: %s\n' % str(self.steadyprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.steadyprob[self.e[pstate]]))
out += sep
out += 'Emissions\n'
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
out += sep2
out += 'Feature %s: %s\n' % (feature_name, feature_distr)
for param in _DISTRIBUTIONS[feature_distr]:
out += '.: %s = %s\n' % (param, str(self.emission[feature_name][param][0]))
for pstate in pstates:
out += '%s: %s = %s\n' % (pstate, param, str(self.emission[feature_name][param][self.e[pstate]]))
out += sep
return out
def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
"""
Determine the joint maximum likelihood estimate
"""
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return
def pdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * pdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * pdf(xi))
return p
return fn
def cdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * cdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * cdf(xi))
return p
return fn
def params(self, pstates=None):
if pstates is None:
pstates = [None] + sorted(
set(self.er.values())) # TODO: self.e caches any unknown value, maybe it shouldn't?
params = []
# emission parameters
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
for feature, distr in zip(self.emission_name, self.emission_distr):
for feature_param in _DISTRIBUTIONS[distr]:
params.append(self.emission[feature][feature_param][self.e[pstate_label], hstate])
# transition parameters, diagonals only assuming 2 state
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
params.append(self.transmat[self.e[pstate_label], self.e[pstate_label], hstate, hstate])
return np.array(params)
|
vmonaco/pohmm | pohmm/pohmm.py | Pohmm.expected_value | python | def expected_value(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(w * expected_lognormal(self.emission[feature]['logsigma'].flatten(),
self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(w * expected_normal(self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten()))
elif hstate is None:
# Marginalize hstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(hstate_prob * expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, :],
self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(hstate_prob * expected_normal(self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :]))
elif pstate is None:
# Marginalize pstate
if self.emission_name_distr[feature] == 'lognormal':
return np.sum(pstate_prob * expected_lognormal(self.emission[feature]['logsigma'][:, hstate],
self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
return np.sum(pstate_prob * expected_normal(self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate]))
else:
if self.emission_name_distr[feature] == 'lognormal':
return expected_lognormal(self.emission[feature]['logsigma'][pstate_idx, hstate],
self.emission[feature]['logmu'][pstate_idx, hstate])
elif self.emission_name_distr[feature] == 'normal':
return expected_normal(self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
return | Determine the joint maximum likelihood estimate | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/pohmm.py#L989-L1049 | [
"def expected_lognormal(logsigma, logmu):\n return np.exp(logmu + (logsigma ** 2) / 2)\n",
"def expected_normal(logmu, logsigma):\n return logmu\n"
] | class Pohmm(object):
"""
Partially observable hidden Markov model
"""
def __init__(self,
n_hidden_states=2,
emissions=['normal'],
max_iter=1000,
thresh=1e-6,
init_method='obs',
init_spread=2,
smoothing=None,
random_state=None):
if type(n_hidden_states) is int:
self.n_hidden_states = n_hidden_states
else:
raise Exception('Wrong type for n_hidden_states. Must be int')
if type(emissions[0]) is tuple:
emission_name, emission_distr = zip(*emissions)
elif type(emissions[0]) is str:
emission_name, emission_distr = np.arange(len(emissions)), emissions
for distr in emission_distr:
if distr not in _DISTRIBUTIONS.keys():
raise ValueError('Emission distribution must be one of', _DISTRIBUTIONS.keys())
self.emission_name = emission_name
self.emission_distr = emission_distr
self.emission_name_distr = dict(zip(emission_name, emission_distr))
self.n_features = len(emissions)
# Set up the emission parameters
# emission: {'feature':{'param': np.array(shape=(n_partial_states, n_hidden_states))}}
self.emission = defaultdict(dict)
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
self.emission[name][param] = None
self.emission = dict(self.emission)
assert max_iter >= 0
assert thresh >= 0
self.max_iter = max_iter
self.thresh = thresh
assert init_spread >= 0
self.init_spread = init_spread
if init_method not in _INIT_METHODS:
raise ValueError('init_method must be one of', _INIT_METHODS)
self.init_method = init_method
if smoothing is None:
smoothing = {'transmat': None, 'startprob': None}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = None
elif type(smoothing) is str:
s = smoothing
smoothing = {'transmat': s, 'startprob': s}
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
smoothing['%s:%s' % (name, param)] = s
elif type(smoothing) is dict:
assert 'transmat' in smoothing.keys()
assert 'startprob' in smoothing.keys()
for name, distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[distr]:
assert param in smoothing.keys() or '%s:%s' % (name, param) in smoothing.keys()
if param in smoothing.keys() and '%s:%s' % (name, param) not in smoothing.keys():
smoothing['%s:%s' % (name, param)] = smoothing[param]
else:
raise Exception('Wrong type for smoothing. Must be None, str, or dict')
self.smoothing = smoothing
self.random_state = random_state
# Number of unique partial states is unknown until fit
self.n_partial_states = None
# Results after fitting the model
self.logprob_ = None
self.n_iter_performed_ = None
self.logprob_delta_ = None
# Mapping between p-states and a unique index
# Defaults to 0 for unknown or missing p-states
self.e = defaultdict(int)
def _get_startprob(self):
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
startprob = normalize(startprob, axis=1)
if len(startprob) != self.n_partial_states:
raise ValueError('startprob must have length n_partial_states')
if not np.allclose(np.sum(startprob, axis=1), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob = property(_get_startprob, _set_startprob)
def _get_steadyprob(self):
return np.exp(self._log_steadyprob)
def _set_steadyprob(self, steadyprob):
if steadyprob is None:
steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
else:
steadyprob = np.asarray(steadyprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(steadyprob):
steadyprob = normalize(steadyprob, axis=1)
if len(steadyprob) != self.n_partial_states:
raise ValueError('steadyprob must have length n_partial_states')
if not np.allclose(np.sum(steadyprob, axis=1), 1.0):
raise ValueError('steadyprob must sum to 1.0')
self._log_steadyprob = np.log(np.asarray(steadyprob).copy())
steadyprob = property(_get_steadyprob, _set_steadyprob)
def _get_transmat(self):
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
transmat = normalize(transmat, axis=3)
if (np.asarray(transmat).shape
!= (self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states)):
raise ValueError('transmat must have shape '
'(n_partial_states,n_partial_states,n_hidden_states,n_hidden_states)')
if not np.all(np.allclose(np.sum(transmat, axis=3), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat = property(_get_transmat, _set_transmat)
def _compute_log_likelihood(self, obs, pstates_idx):
q = np.zeros(shape=(len(obs), self.n_hidden_states, self.n_features))
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx]
sigma = self.emission[feature_name]['sigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(
np.maximum(MIN_PROBA, stats.norm.pdf(obs[:, col], loc=mu[:, j], scale=sigma[:, j])))
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx]
for j in range(self.n_hidden_states):
q[:, j, col] = np.log(np.maximum(MIN_PROBA,
stats.lognorm.pdf(obs[:, col], logsigma[:, j], loc=0,
scale=np.exp(logmu[:, j]))))
q = q.sum(axis=2)
return q
def _generate_sample_from_state(self, hidden_state, pstates_idx, random_state=None):
sample = np.zeros(self.n_features)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
mu = self.emission[feature_name]['mu'][pstates_idx][hidden_state]
sigma = self.emission[feature_name]['sigma'][pstates_idx][hidden_state]
sample[col] = stats.norm.rvs(loc=mu, scale=sigma, random_state=random_state)
if feature_distr == 'lognormal':
logmu = self.emission[feature_name]['logmu'][pstates_idx][hidden_state]
logsigma = self.emission[feature_name]['logsigma'][pstates_idx][hidden_state]
sample[col] = stats.lognorm.rvs(logsigma, loc=0, scale=np.exp(logmu), random_state=random_state)
return sample
def _init_pstates(self, unique_pstates):
# Map events to a unique index. The unknown p-state is at idx 0
self.e = defaultdict(int)
self.e.update(dict(zip(np.sort(unique_pstates), range(1, len(unique_pstates) + 1))))
self.er = {v: k for k, v in self.e.items()} # Reverse lookup
self.er[0] = UNKNOWN_PSTATE
self.n_partial_states = len(self.e.keys()) + 1 # Add one for the unknown state
return
def _init_pstate_freqs(self, pstates_idx):
# Partial state frequencies
self.pstate_freq = Counter([idx for seq in pstates_idx for idx in seq])
self.pstate_trans_freq = Counter([(idx1, idx2) for seq in pstates_idx for idx1, idx2 in zip(seq[:-1], seq[1:])])
# Store freqs for the meta state
self.pstate_freq[0] = len(np.concatenate(pstates_idx))
for seq in pstates_idx:
self.pstate_startprob[seq[0]] += 1
self.pstate_steadyprob += np.bincount(seq, minlength=self.n_partial_states)
for idx1, idx2 in zip(seq[:-1], seq[1:]):
self.pstate_trans_freq[(0, 0)] += 1
self.pstate_trans_freq[(idx1, 0)] += 1
self.pstate_trans_freq[(0, idx2)] += 1
self.pstate_transmat[idx1, idx2] += 1
# TODO: separate probas from freqs
# Normalize to get the probabilities, ignore the meta state at idx 0
self.pstate_startprob[1:] = normalize(self.pstate_startprob[1:])
self.pstate_transmat[1:, 1:] = normalize(self.pstate_transmat[1:, 1:], axis=1)
self.pstate_steadyprob[1:] = normalize(self.pstate_steadyprob[1:])
return
def _init_from_obs(self, obs, pstates_idx):
# Partial state probabilities
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
# obs should be (N*T, n_features)
# N is the number of samples
# T is the size of each sample
obs = np.concatenate(obs)
pstates_idx = np.concatenate(pstates_idx)
# Initialize starting and transition probas
self.startprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.steadyprob = np.ones(shape=(self.n_partial_states, self.n_hidden_states)) / self.n_hidden_states
self.transmat = np.ones(shape=(self.n_partial_states, self.n_partial_states, self.n_hidden_states,
self.n_hidden_states)) / self.n_hidden_states
# Initialize emission parameters
# Hidden states are ordered by the first feature
feature1 = self.emission_name[0]
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx] = obs[idx_pstate, col].mean() + \
obs[:, col].std() * np.linspace(
-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['sigma'][idx, :] = np.maximum(obs[idx_pstate, col].std(), MIN_PROBA)
self.emission[feature_name]['mu'][idx, :] = obs[idx_pstate, col].mean()
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
for idx in range(1, self.n_partial_states):
idx_pstate = (pstates_idx == idx)
if not np.any(idx_pstate):
idx_pstate = np.arange(len(pstates_idx))
if feature_name == feature1:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean() + np.log(
obs[:, col]).std() * np.linspace(-self.init_spread, self.init_spread,
self.n_hidden_states)
else:
self.emission[feature_name]['logsigma'][idx, :] = np.maximum(np.log(obs[idx_pstate, col]).std(),
MIN_PROBA)
self.emission[feature_name]['logmu'][idx] = np.log(obs[idx_pstate, col]).mean()
return
def _init_random(self, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
self.pstate_startprob = np.zeros(self.n_partial_states)
self.pstate_transmat = np.zeros((self.n_partial_states, self.n_partial_states))
self.pstate_steadyprob = np.zeros(self.n_partial_states)
self.pstate_startprob[1:] = gen_stochastic_matrix(size=self.n_partial_states - 1, random_state=random_state)
self.pstate_transmat[1:, 1:] = gen_stochastic_matrix(
size=(self.n_partial_states - 1, self.n_partial_states - 1), random_state=random_state)
self.pstate_steadyprob[1:] = steadystate(self.pstate_transmat[1:, 1:])
self.startprob = gen_stochastic_matrix(size=(self.n_partial_states, self.n_hidden_states),
random_state=random_state)
transmat = np.zeros((self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
transmat[i, j] = gen_stochastic_matrix(size=(self.n_hidden_states, self.n_hidden_states),
random_state=random_state)
self.transmat = normalize(transmat, axis=3)
# Initialize emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
self.emission[feature_name]['mu'] = random_state.uniform(*_RANDINIT[feature_distr]['mu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['sigma'] = random_state.uniform(*_RANDINIT[feature_distr]['sigma'], size=(
self.n_partial_states, self.n_hidden_states))
if feature_distr == 'lognormal':
self.emission[feature_name]['logmu'] = random_state.uniform(*_RANDINIT[feature_distr]['logmu'], size=(
self.n_partial_states, self.n_hidden_states))
self.emission[feature_name]['logsigma'] = random_state.uniform(*_RANDINIT[feature_distr]['logsigma'],
size=(self.n_partial_states,
self.n_hidden_states))
if self.emission_distr[0] == 'normal':
self.emission[self.emission_name[0]]['mu'] = np.sort(self.emission[self.emission_name[0]]['mu'], axis=1)
elif self.emission_distr[0] == 'lognormal':
self.emission[self.emission_name[0]]['logmu'] = np.sort(self.emission[self.emission_name[0]]['logmu'],
axis=1)
return
def _smooth(self):
self._compute_marginals()
startprob = self.startprob
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing['startprob']:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing['startprob']:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing['startprob']:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing['startprob']:
w_ = 1
w_j = 0
elif self.smoothing['startprob'] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for startprob: ' + self.smoothing['startprob'])
startprob[j] = w_j * self.startprob[j] + w_ * self.startprob[0]
self.startprob = startprob
transmat = self.transmat
for i, j in product(range(1, self.n_partial_states), range(1, self.n_partial_states)):
if 'freq' == self.smoothing['transmat']:
w_i0 = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[0, j])
w_0j = 1 / (1 + self.pstate_trans_freq[i, j] + self.pstate_trans_freq[i, 0])
w_ij = 1 - (w_i0 + w_0j)
w_ = 0
elif 'proba' == self.smoothing['transmat']:
denom = self.pstate_transmat[i, j] + self.pstate_transmat[i, :].sum() + self.pstate_transmat[:, j].sum()
w_i0 = self.pstate_transmat[i, :].sum() / denom
w_0j = self.pstate_transmat[:, j].sum() / denom
w_ij = self.pstate_transmat[i, j] / denom
w_ = 0
elif 'exp' == self.smoothing['transmat']:
w_i0 = self.pstate_trans_freq[i, 0] * np.exp(-self.pstate_trans_freq[i, j])
w_0j = self.pstate_trans_freq[0, j] * np.exp(-self.pstate_trans_freq[i, j])
w_ = self.pstate_trans_freq[0, 0] * np.exp(
-(self.pstate_trans_freq[i, 0] + self.pstate_trans_freq[0, j]))
w_ij = self.pstate_trans_freq[i, j]
w_ij, w_i0, w_0j, w_ = normalize(np.array([w_ij, w_i0, w_0j, w_]))
elif 'fixed' == self.smoothing['transmat']:
w_i0 = 0
w_0j = 0
w_ = 1
w_ij = 0
elif self.smoothing['transmat'] is None:
w_i0 = 0
w_0j = 0
w_ = 0
w_ij = 1
else:
raise Exception('Wrong smoothing for transmat: ' + self.smoothing['transmat'])
assert (w_i0 + w_0j + w_ + w_ij) - 1 < TOLERANCE
transmat[i, j] = w_ij * self.transmat[i, j] + w_i0 * self.transmat[i, 0] + w_0j * self.transmat[0, j] + w_ * \
self.transmat[
0, 0]
self.transmat = transmat
assert np.all(self.startprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.steadyprob.sum(axis=1) - 1 < TOLERANCE)
assert np.all(self.transmat.sum(axis=3) - 1 < TOLERANCE)
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
for param in _DISTRIBUTIONS[feature_distr]:
key = '%s:%s' % (feature_name, param)
for j in range(1, self.n_partial_states):
if 'freq' == self.smoothing[key]:
w_ = 1 / (1 + self.pstate_freq[j])
w_j = 1 - w_
elif 'proba' == self.smoothing[key]:
w_j = self.pstate_steadyprob[j]
w_ = 1 - w_j
elif 'exp' == self.smoothing[key]:
w_ = np.exp(-self.pstate_freq[j])
w_j = 1 - w_
elif 'fixed' == self.smoothing[key]:
w_ = 1
w_j = 0
elif self.smoothing[key] is None:
w_ = 0
w_j = 1
else:
raise Exception('Wrong smoothing for ' + key)
self.emission[feature_name][param][j] = w_j * self.emission[feature_name][param][j] + w_ * \
self.emission[
feature_name][
param][0]
return
def _initialize_sufficient_statistics(self):
stats = {
'nobs': 0,
'post': np.zeros((self.n_partial_states, self.n_hidden_states)),
'obs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'obs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'lnobs**2': np.zeros((self.n_partial_states, self.n_hidden_states, self.n_features)),
'start': np.zeros((self.n_partial_states, self.n_hidden_states)),
'steady': np.zeros((self.n_partial_states, self.n_hidden_states)),
'trans': np.zeros(
(self.n_partial_states, self.n_partial_states, self.n_hidden_states, self.n_hidden_states))
}
return stats
def _accumulate_sufficient_statistics(self, stats, obs, pstates_idx, framelogprob,
posteriors, fwdlattice, bwdlattice):
stats['nobs'] += 1
n_observations, n_hidden_states = framelogprob.shape
stats['start'][0] += posteriors[0]
for i in range(self.n_partial_states):
if len(np.where(pstates_idx == i)[0]) > 0:
stats['start'][i] += posteriors[np.where(pstates_idx == i)[0].min()]
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_hidden_states, n_hidden_states))
_hmmc._compute_lneta(n_observations, n_hidden_states, pstates_idx, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
for i, j in unique_rows(np.c_[pstates_idx[:-1], pstates_idx[1:]]):
if ((pstates_idx[:-1] == i) & (pstates_idx[1:] == j)).sum() > 0:
stats['trans'][i, j] += np.exp(
logsumexp(lneta[(pstates_idx[:-1] == i) & (pstates_idx[1:] == j)], axis=0))
for i in range(self.n_partial_states):
stats['post'][i] += posteriors[pstates_idx == i].sum(axis=0)
stats['obs'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i])
stats['obs**2'][i] += np.dot(posteriors[pstates_idx == i].T, obs[pstates_idx == i] ** 2)
stats['lnobs'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]))
stats['lnobs**2'][i] += np.dot(posteriors[pstates_idx == i].T, np.log(obs[pstates_idx == i]) ** 2)
return
def _compute_marginals(self):
# TODO: cythonize some of this
# Start prob, weighted by p-state start probs
self.startprob[0] = (self.pstate_startprob[1:, np.newaxis] * self.startprob[1:]).sum(axis=0)
# Use the p-state transmat and transmat to get the full transmat
full_transmat = ph2full(self.pstate_transmat[1:, 1:], self.transmat[1:, 1:])
full_steadyprob = steadystate(full_transmat)
# Steady state probas are determined by the full trans mat, need to be updated
steadyprob = np.zeros(shape=(self.n_partial_states, self.n_hidden_states))
steadyprob[0] = full_steadyprob.reshape(-1, self.n_hidden_states).sum(axis=0)
for i in range(self.n_partial_states - 1):
steadyprob[i + 1] = normalize(
full_steadyprob[i * self.n_hidden_states:i * self.n_hidden_states + self.n_hidden_states])
self.steadyprob = steadyprob
# Update the transations to/from the marginal state
transmat = self.transmat
# Group the hidden states within each partial state
for hidx1, hidx2 in product(range(self.n_hidden_states), range(self.n_hidden_states)):
transmat[0, 0][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states, hidx2::self.n_hidden_states].sum()
for pidx in range(self.n_partial_states - 1):
transmat[pidx + 1, 0][hidx1, hidx2] = full_transmat[pidx * self.n_hidden_states + hidx1,
hidx2::self.n_hidden_states].sum()
transmat[0, pidx + 1][hidx1, hidx2] = full_transmat[hidx1::self.n_hidden_states,
pidx * self.n_hidden_states + hidx2].sum()
self.transmat = normalize(transmat, axis=3)
pweights = self.pstate_steadyprob[1:, np.newaxis]
# Update emission parameters
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
if feature_distr == 'normal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['mu'][1:, :]
sigma = self.emission[feature_name]['sigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['mu'][0, :] = mu_0
self.emission[feature_name]['sigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
if feature_distr == 'lognormal':
# Marginal state is a mixture of normals
mu = self.emission[feature_name]['logmu'][1:, :]
sigma = self.emission[feature_name]['logsigma'][1:, :]
# Weighted mean and var
mu_0 = (pweights * mu).sum(axis=0)
self.emission[feature_name]['logmu'][0, :] = mu_0
self.emission[feature_name]['logsigma'][0, :] = np.sqrt(
(pweights * ((mu - mu_0) ** 2 + sigma ** 2)).sum(axis=0))
return
def _do_mstep(self, stats):
self.startprob = normalize(np.maximum(stats['start'], MIN_PROBA), axis=1)
self.transmat = normalize(np.maximum(stats['trans'], MIN_PROBA), axis=3)
for col, (feature_name, feature_distr) in enumerate(zip(self.emission_name, self.emission_distr)):
if feature_distr == 'normal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['obs'][:, :, col] / denom
cv_num = (stats['obs**2'][:, :, col]
- 2 * mu * stats['obs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['mu'] = mu
self.emission[feature_name]['sigma'] = np.maximum(sigma, MIN_PROBA)
if feature_distr == 'lognormal':
denom = np.maximum(stats['post'], MIN_PROBA)
mu = stats['lnobs'][:, :, col] / denom
cv_num = (stats['lnobs**2'][:, :, col]
- 2 * mu * stats['lnobs'][:, :, col]
+ mu ** 2 * denom)
sigma = np.sqrt(cv_num / denom)
sigma[np.isnan(sigma)] = MIN_PROBA
self.emission[feature_name]['logmu'] = mu
self.emission[feature_name]['logsigma'] = np.maximum(sigma, MIN_PROBA)
return
def _do_forward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._forward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob, event_idx):
n_observations, n_hidden_states = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_hidden_states))
_hmmc._backward(n_observations, n_hidden_states,
event_idx, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _do_viterbi_pass(self, framelogprob, event_idx):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components,
event_idx, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def rand(self, unique_pstates, random_state=None):
"""
Randomize the POHMM parameters
"""
self._init_pstates(unique_pstates)
self._init_random(random_state=random_state)
self._compute_marginals()
return self
def fit(self, obs, pstates, unique_pstates=None):
"""
Estimate model parameters.
"""
obs = [np.array(o) for o in obs]
pstates = [np.array(p) for p in pstates]
# List or array of observation sequences
assert len(obs) == len(pstates)
assert obs[0].ndim == 2
assert pstates[0].ndim == 1
if unique_pstates is not None:
self._init_pstates(unique_pstates)
else:
self._init_pstates(list(set(np.concatenate(pstates))))
# Map the partial states to a unique index
pstates_idx = [np.array([self.e[p] for p in seq]) for seq in pstates]
if self.init_method == 'rand':
self._init_random()
elif self.init_method == 'obs':
self._init_from_obs(obs, pstates_idx)
self._init_pstate_freqs(pstates_idx)
self._smooth()
logprob = []
for i in range(self.max_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for obs_i, pstates_idx_i in zip(obs, pstates_idx):
framelogprob = self._compute_log_likelihood(obs_i, pstates_idx_i)
lpr, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx_i)
bwdlattice = self._do_backward_pass(framelogprob, pstates_idx_i)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(stats, obs_i,
pstates_idx_i, framelogprob, posteriors, fwdlattice, bwdlattice)
logprob.append(curr_logprob)
self.logprob_ = curr_logprob
# Check for convergence.
self.n_iter_performed_ = i
if i > 0:
self.logprob_delta = logprob[-1] - logprob[-2]
if self.logprob_delta < self.thresh:
break
# Maximization step
self._do_mstep(stats)
# Mix the parameters
self._smooth()
return self
def score(self, obs, pstates):
"""
Compute the log probability under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
logprob, _ = self._do_forward_pass(framelogprob, pstates_idx)
return logprob
def score_events(self, obs, pstates):
"""
Compute the log probability of each event under the model.
"""
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
L = logsumexp(fwdlattice, axis=1)
return np.concatenate([L[[0]], np.diff(L)])
def predict_states(self, obs, pstates):
pstates_idx = np.array([self.e[p] for p in pstates])
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob, pstates_idx)
return viterbi_logprob, state_sequence
def predict(self, obs, pstates, next_pstate=None):
"""
Predict the next observation
"""
assert len(obs) == len(pstates)
pstates_idx = np.array([self.e[ei] for ei in pstates])
next_pstate_idx = self.e[next_pstate]
if len(obs) == 0:
# No history, use the starting probas
next_hstate_prob = self.startprob[next_pstate_idx]
else:
# With a history, determine the hidden state posteriors using
# the last posteriors and transition matrix
framelogprob = self._compute_log_likelihood(obs, pstates_idx)
_, fwdlattice = self._do_forward_pass(framelogprob, pstates_idx)
next_hstate_prob = np.zeros(self.n_hidden_states)
alpha_n = fwdlattice[-1]
vmax = alpha_n.max(axis=0)
alpha_n = np.exp(alpha_n - vmax)
alpha_n = alpha_n / alpha_n.sum()
trans = self.transmat[pstates_idx[-1], next_pstate_idx]
for i in range(self.n_hidden_states):
next_hstate_prob[i] = np.sum([alpha_n[j] * trans[j, i] for j in range(self.n_hidden_states)])
assert next_hstate_prob.sum() - 1 < TOLERANCE
# Make the prediction
prediction = np.array(
[self.expected_value(feature, pstate=next_pstate, hstate_prob=next_hstate_prob) for feature in
self.emission_name])
# next_hstate = np.argmax(next_hstate_prob)
# prediction = np.array(
# [self.expected_value(feature, pstate=next_pstate, hstate=next_hstate) for feature in
# self.emission_name])
return prediction
def gen_pstates_idx(self, n, random_state=None):
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.pstate_startprob)
transmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial state.
rand = random_state.rand()
curr_pstate = (startprob_cdf > rand).argmax()
pstates = [curr_pstate]
for _ in range(1, n):
rand = random_state.rand()
curr_pstate = (transmat_cdf[curr_pstate] > rand).argmax()
pstates.append(curr_pstate)
return np.array(pstates, dtype=int)
def sample(self, pstates=None, n_obs=None, random_state=None):
"""
"""
random_state = check_random_state(random_state)
if pstates is None and n_obs is None:
raise Exception('Must provide either pstates or n_obs')
if pstates is not None and n_obs is not None:
raise Exception('Must provide either pstates or n_obs but not both')
gen_pstates = False
rand = random_state.rand()
if pstates is None:
gen_pstates = True
pstartprob_cdf = np.cumsum(self.pstate_startprob)
ptransmat_cdf = np.cumsum(self.pstate_transmat, 1)
# Initial pstate
currpstate = (pstartprob_cdf > rand).argmax()
pstates_idx = [currpstate]
pstates = [self.er[currpstate]]
else:
n_obs = len(pstates)
pstates_idx = np.array([self.e[p] for p in pstates])
startprob_pdf = self.startprob[pstates_idx[0]]
startprob_cdf = np.cumsum(startprob_pdf)
transmat_cdf = np.cumsum(self.transmat[0, pstates_idx[0]], 1)
# Initial hidden state
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(currstate, pstates_idx[0], random_state)]
for i in range(1, n_obs):
rand = random_state.rand()
if gen_pstates:
currpstate = (ptransmat_cdf[currpstate] > rand).argmax()
pstates_idx.append(currpstate)
pstates.append(self.er[currpstate])
transmat_cdf = np.cumsum(self.transmat[pstates_idx[i - 1], pstates_idx[i]], 1)
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(currstate, pstates_idx[i], random_state))
return np.array(obs), np.array(pstates), np.array(hidden_states, dtype=int)
def fit_df(self, dfs, pstate_col=PSTATE_COL):
"""
Convenience function to fit a model from a list of dataframes
"""
obs_cols = list(self.emission_name)
obs = [df[df.columns.difference([pstate_col])][obs_cols].values for df in dfs]
pstates = [df[pstate_col].values for df in dfs]
return self.fit(obs, pstates)
def score_df(self, df, pstate_col=PSTATE_COL):
"""
"""
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.score(obs, pstates)
def score_events_df(self, df, pstate_col=PSTATE_COL, score_col='score'):
"""
"""
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
df[score_col] = self.score_events(obs, pstates)
return df
def predict_states_df(self, df, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
df = df.copy()
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
_, df[hstate_col] = self.predict_states(obs, pstates)
return df
def predict_df(self, df, next_pstate=None, pstate_col=PSTATE_COL):
obs_cols = list(self.emission_name)
obs = df[df.columns.difference([pstate_col])][obs_cols].values
pstates = df[pstate_col].values
return self.predict(obs, pstates, next_pstate)
def sample_df(self, pstates=None, n_obs=None, random_state=None, pstate_col=PSTATE_COL, hstate_col=HSTATE_COL):
"""
Convenience function to generate samples a model and create a dataframe
"""
try:
import pandas as pd
except Exception as e:
raise e
obs, pstates, hstates = self.sample(pstates, n_obs, random_state)
items = []
if pstate_col is not None:
items.append((pstate_col, pstates))
if hstate_col is not None:
items.append((hstate_col, hstates))
items = items + [(self.emission_name[i], obs[:, i]) for i in range(self.n_features)]
df = pd.DataFrame.from_items(items)
return df
def __str__(self):
pstates = sorted(self.e.keys())
sep = '-' * 80 + '\n'
sep2 = '_' * 40 + '\n'
out = 'POHMM\n'
out += 'H-states: %d\n' % self.n_hidden_states
out += 'P-states: (%d) %s\n' % (len(pstates), str(pstates))
out += 'Emission: %s\n' % (self.emission_distr)
out += sep
out += 'Transition probas\n'
out += sep2
out += '. -> .\n%s\n' % str(self.transmat[0, 0])
for pstate in pstates:
out += sep2
out += '%s -> .\n%s\n' % (pstate, str(self.transmat[self.e[pstate], 0]))
out += sep2
out += '. -> %s\n%s\n' % (pstate, str(self.transmat[0, self.e[pstate]]))
for pstate1, pstate2 in product(pstates, pstates):
out += sep2
out += '%s -> %s\n%s\n' % (pstate1, pstate2, str(self.transmat[self.e[pstate1], self.e[pstate2]]))
out += sep
out += 'Starting probas\n'
out += '.: %s\n' % str(self.startprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.startprob[self.e[pstate]]))
out += sep
out += 'Steady probas\n'
out += '.: %s\n' % str(self.steadyprob[0])
for pstate in pstates:
out += '%s: %s\n' % (pstate, str(self.steadyprob[self.e[pstate]]))
out += sep
out += 'Emissions\n'
for feature_name, feature_distr in zip(self.emission_name, self.emission_distr):
out += sep2
out += 'Feature %s: %s\n' % (feature_name, feature_distr)
for param in _DISTRIBUTIONS[feature_distr]:
out += '.: %s = %s\n' % (param, str(self.emission[feature_name][param][0]))
for pstate in pstates:
out += '%s: %s = %s\n' % (pstate, param, str(self.emission[feature_name][param][self.e[pstate]]))
out += sep
return out
def pdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
pdf = lambda x: stats.lognorm.pdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
pdf = lambda x: stats.norm.pdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * pdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * pdf(xi))
return p
return fn
def cdf_fn(self, feature=None, pstate=None, hstate=None, pstate_prob=None, hstate_prob=None):
# Use the first feature by default
if feature is None:
feature = self.emission_name[0]
# Will default to marginal pstate if pstate is unknown or None
pstate_idx = self.e[pstate]
if pstate is not None and pstate_prob is not None:
raise Exception('Must provide either pstate or pstate_proba but not both')
if hstate is not None and hstate_prob is not None:
raise Exception('Must provide either hstate or hstate_proba but not both')
# Marginalize pstate using the steady state probas
if pstate_prob is None:
pstate_prob = self.pstate_steadyprob
# Marginalize hstate using the steady state probas
if hstate_prob is None:
hstate_prob = self.steadyprob[pstate_idx]
if pstate is None and hstate is None:
# Marginalize both pstate and hstate
w = (pstate_prob[:, np.newaxis] * hstate_prob).flatten()
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'].flatten(), 0,
np.exp(self.emission[feature]['logmu'].flatten()))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'].flatten(),
self.emission[feature]['sigma'].flatten())
elif hstate is None:
# Marginalize hstate
w = hstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, :], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, :]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, :],
self.emission[feature]['sigma'][pstate_idx, :])
elif pstate is None:
# Marginalize pstate
w = pstate_prob
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][:, hstate], 0,
np.exp(self.emission[feature]['logmu'][:, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][:, hstate],
self.emission[feature]['sigma'][:, hstate])
else:
w = 1
if self.emission_name_distr[feature] == 'lognormal':
cdf = lambda x: stats.lognorm.cdf(x, self.emission[feature]['logsigma'][pstate_idx, hstate], 0,
np.exp(self.emission[feature]['logmu'][pstate_idx, hstate]))
elif self.emission_name_distr[feature] == 'normal':
cdf = lambda x: stats.norm.cdf(x, self.emission[feature]['mu'][pstate_idx, hstate],
self.emission[feature]['sigma'][pstate_idx, hstate])
def fn(x):
if np.isscalar(x):
p = np.sum(w * cdf(x))
else:
x = np.array(x)
p = np.zeros(len(x))
for i, xi in enumerate(x):
p[i] = np.sum(w * cdf(xi))
return p
return fn
def params(self, pstates=None):
if pstates is None:
pstates = [None] + sorted(
set(self.er.values())) # TODO: self.e caches any unknown value, maybe it shouldn't?
params = []
# emission parameters
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
for feature, distr in zip(self.emission_name, self.emission_distr):
for feature_param in _DISTRIBUTIONS[distr]:
params.append(self.emission[feature][feature_param][self.e[pstate_label], hstate])
# transition parameters, diagonals only assuming 2 state
for hstate, pstate_label in product(range(self.n_hidden_states), pstates):
params.append(self.transmat[self.e[pstate_label], self.e[pstate_label], hstate, hstate])
return np.array(params)
|
vmonaco/pohmm | examples/keystroke.py | stratified_kfold | python | def stratified_kfold(df, n_folds):
sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique())
sessions.apply(lambda x: np.random.shuffle(x))
folds = []
for i in range(n_folds):
idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) / n_folds):(i + 1) * (len(x) / n_folds)]))
idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values
folds.append(df.loc[idx])
return folds | Create stratified k-folds from an indexed dataframe | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/examples/keystroke.py#L15-L26 | null | import numpy as np
import pandas as pd
from scipy import interp
from sklearn.metrics import auc
from sklearn.metrics.ranking import _binary_clf_curve
from pohmm import Pohmm, PohmmClassifier
# CMU Keystroke Dynamics Benchmark Dataset
# See: http://www.cs.cmu.edu/~keystroke/
# Kevin S. Killourhy and Roy A. Maxion. "Comparing Anomaly Detectors for Keystroke Dynamics"
DATASET_URL = 'http://www.cs.cmu.edu/~keystroke/DSL-StrongPasswordData.csv'
def user_folds(df, target):
users = df.index.get_level_values(0).unique()
return [df.loc[u].reset_index().set_index([target, 'session']) for u in users]
def preprocess(df):
"""Convert the CMU dataset from row vectors into time/duration row observations"""
def process_row(idx_row):
idx, row = idx_row
# press-press latency
tau = 1000 * row[4::3].astype(float).values
tau = np.r_[np.median(tau), tau]
# duration
duration = 1000 * row[3::3].astype(float).values
keyname = list('.tie5Roanl') + ['enter']
return pd.DataFrame.from_items([
('user', [row['subject']] * 11),
('session', [row['sessionIndex'] * 100 + row['rep']] * 11),
('tau', tau),
('duration', duration),
('event', keyname)
])
df = pd.concat(map(process_row, df.iterrows())).set_index(['user', 'session'])
return df
def roc_curve(y_true, y_score):
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=None, sample_weight=None)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1e-2, thresholds]
fpr = fps / fps[-1]
tpr = tps / tps[-1]
return fpr, 1 - tpr, thresholds
def ROC(scores):
# Generate an ROC curve for each fold, ordered by increasing threshold
roc = scores.groupby('user').apply(lambda x: pd.DataFrame(np.c_[roc_curve(x['genuine'], x['score'])][::-1],
columns=['far', 'frr', 'threshold']))
# interpolate to get the same threshold values in each fold
thresholds = np.sort(roc['threshold'].unique())
roc = roc.groupby(level='user').apply(lambda x: pd.DataFrame(np.c_[thresholds,
interp(thresholds, x['threshold'], x['far']),
interp(thresholds, x['threshold'], x['frr'])],
columns=['threshold', 'far', 'frr']))
roc = roc.reset_index(level=1, drop=True).reset_index()
return roc
def EER(roc):
far, frr = roc['far'].values, roc['frr'].values
def perp(a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
def seg_intersect(a1, a2, b1, b2):
da = a2 - a1
db = b2 - b1
dp = a1 - b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom) * db + b1
d = far <= frr
idx = np.diff(d).nonzero()[0][0]
return seg_intersect(np.array([idx, far[idx]]),
np.array([idx + 1, far[idx + 1]]),
np.array([idx, frr[idx]]),
np.array([idx + 1, frr[idx + 1]]))[1]
def AUC(roc):
return auc(roc['frr'].values, roc['far'].values)
def keystroke_model():
"""Generates a 2-state model with lognormal emissions and frequency smoothing"""
model = Pohmm(n_hidden_states=2,
init_spread=2,
emissions=['lognormal', 'lognormal'],
smoothing='freq',
init_method='obs',
thresh=1)
return model
def identification(df, n_folds=10, seed=1234):
# Obtain identification results using k-fold cross validation
np.random.seed(seed)
folds = stratified_kfold(df, n_folds)
identification_results = []
for i in range(n_folds):
print('Fold %d of %d' % (i + 1, n_folds))
test_idx, test_samples = zip(*folds[i].groupby(level=[0, 1]))
train_idx, train_samples = zip(*pd.concat(folds[:i] + folds[i + 1:]).groupby(level=[0, 1]))
test_labels, _ = zip(*test_idx)
train_labels, _ = zip(*train_idx)
cl = PohmmClassifier(keystroke_model)
cl.fit_df(train_labels, train_samples)
for test_label, test_sample in zip(test_labels, test_samples):
result, _ = cl.predict_df(test_sample)
identification_results.append((i, test_label, result))
identification_results = pd.DataFrame.from_records(identification_results,
columns=['fold', 'label', 'prediction'])
acc_summary = identification_results.groupby('fold').apply(
lambda x: (x['label'] == x['prediction']).sum() / len(x)).describe()
print('Identification summary')
print('ACC: %.3f +/- %.3f' % (acc_summary['mean'], acc_summary['std']))
return
def verification(df):
verification_results = []
users = set(df.index.get_level_values(level='user').unique())
for genuine_user in users:
impostor_users = users.difference([genuine_user])
genuine_samples = df.loc[genuine_user]
_, genuine_samples = zip(*genuine_samples.groupby(level='session'))
train, test = genuine_samples[150:200], genuine_samples[200:]
pohmm = keystroke_model()
pohmm.fit_df(train)
# train_scores = np.array([pohmm.score_df(sample) for sample in train])
scores = []
for sample in test:
score = pohmm.score_df(sample)
scores.append(score)
verification_results.append((genuine_user, True, score))
for imposter_user in impostor_users:
_, impostor_samples = zip(*df.loc[imposter_user].groupby(level='session'))
for sample in impostor_samples[:5]:
score = pohmm.score_df(sample)
scores.append(score)
verification_results.append((genuine_user, False, score))
verification_results = pd.DataFrame.from_records(verification_results,
columns=['user', 'genuine', 'score'])
verification_ROC = verification_results.groupby('user').apply(ROC)
eer_summary = verification_ROC.groupby('user').apply(EER).describe()
auc_summary = verification_ROC.groupby('user').apply(AUC).describe()
print('Verification summary')
print('EER: %.3f +/- %.3f' % (eer_summary['mean'], eer_summary['std']))
print('AUC: %.3f +/- %.3f' % (auc_summary['mean'], auc_summary['std']))
return
if __name__ == '__main__':
print('This example takes about 15 minutes to run on an Intel i5...')
# Download and preprocess the CMU dataset
df = pd.read_csv(DATASET_URL)
df = preprocess(df)
# Verification results obtained using the 4th session as training data,
# sessions 5-8 as genuine and reps 1-5 as impostor
verification(df)
# Identification results obtained by 10-fold stratified cross validation using only the last session
identification(df.groupby(level=0).apply(lambda x: x[-(11 * 50):]).reset_index(level=0, drop=True))
|
vmonaco/pohmm | examples/keystroke.py | preprocess | python | def preprocess(df):
def process_row(idx_row):
idx, row = idx_row
# press-press latency
tau = 1000 * row[4::3].astype(float).values
tau = np.r_[np.median(tau), tau]
# duration
duration = 1000 * row[3::3].astype(float).values
keyname = list('.tie5Roanl') + ['enter']
return pd.DataFrame.from_items([
('user', [row['subject']] * 11),
('session', [row['sessionIndex'] * 100 + row['rep']] * 11),
('tau', tau),
('duration', duration),
('event', keyname)
])
df = pd.concat(map(process_row, df.iterrows())).set_index(['user', 'session'])
return df | Convert the CMU dataset from row vectors into time/duration row observations | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/examples/keystroke.py#L34-L58 | null | import numpy as np
import pandas as pd
from scipy import interp
from sklearn.metrics import auc
from sklearn.metrics.ranking import _binary_clf_curve
from pohmm import Pohmm, PohmmClassifier
# CMU Keystroke Dynamics Benchmark Dataset
# See: http://www.cs.cmu.edu/~keystroke/
# Kevin S. Killourhy and Roy A. Maxion. "Comparing Anomaly Detectors for Keystroke Dynamics"
DATASET_URL = 'http://www.cs.cmu.edu/~keystroke/DSL-StrongPasswordData.csv'
def stratified_kfold(df, n_folds):
"""
Create stratified k-folds from an indexed dataframe
"""
sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique())
sessions.apply(lambda x: np.random.shuffle(x))
folds = []
for i in range(n_folds):
idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) / n_folds):(i + 1) * (len(x) / n_folds)]))
idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values
folds.append(df.loc[idx])
return folds
def user_folds(df, target):
users = df.index.get_level_values(0).unique()
return [df.loc[u].reset_index().set_index([target, 'session']) for u in users]
def roc_curve(y_true, y_score):
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=None, sample_weight=None)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1e-2, thresholds]
fpr = fps / fps[-1]
tpr = tps / tps[-1]
return fpr, 1 - tpr, thresholds
def ROC(scores):
# Generate an ROC curve for each fold, ordered by increasing threshold
roc = scores.groupby('user').apply(lambda x: pd.DataFrame(np.c_[roc_curve(x['genuine'], x['score'])][::-1],
columns=['far', 'frr', 'threshold']))
# interpolate to get the same threshold values in each fold
thresholds = np.sort(roc['threshold'].unique())
roc = roc.groupby(level='user').apply(lambda x: pd.DataFrame(np.c_[thresholds,
interp(thresholds, x['threshold'], x['far']),
interp(thresholds, x['threshold'], x['frr'])],
columns=['threshold', 'far', 'frr']))
roc = roc.reset_index(level=1, drop=True).reset_index()
return roc
def EER(roc):
far, frr = roc['far'].values, roc['frr'].values
def perp(a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
def seg_intersect(a1, a2, b1, b2):
da = a2 - a1
db = b2 - b1
dp = a1 - b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom) * db + b1
d = far <= frr
idx = np.diff(d).nonzero()[0][0]
return seg_intersect(np.array([idx, far[idx]]),
np.array([idx + 1, far[idx + 1]]),
np.array([idx, frr[idx]]),
np.array([idx + 1, frr[idx + 1]]))[1]
def AUC(roc):
return auc(roc['frr'].values, roc['far'].values)
def keystroke_model():
"""Generates a 2-state model with lognormal emissions and frequency smoothing"""
model = Pohmm(n_hidden_states=2,
init_spread=2,
emissions=['lognormal', 'lognormal'],
smoothing='freq',
init_method='obs',
thresh=1)
return model
def identification(df, n_folds=10, seed=1234):
# Obtain identification results using k-fold cross validation
np.random.seed(seed)
folds = stratified_kfold(df, n_folds)
identification_results = []
for i in range(n_folds):
print('Fold %d of %d' % (i + 1, n_folds))
test_idx, test_samples = zip(*folds[i].groupby(level=[0, 1]))
train_idx, train_samples = zip(*pd.concat(folds[:i] + folds[i + 1:]).groupby(level=[0, 1]))
test_labels, _ = zip(*test_idx)
train_labels, _ = zip(*train_idx)
cl = PohmmClassifier(keystroke_model)
cl.fit_df(train_labels, train_samples)
for test_label, test_sample in zip(test_labels, test_samples):
result, _ = cl.predict_df(test_sample)
identification_results.append((i, test_label, result))
identification_results = pd.DataFrame.from_records(identification_results,
columns=['fold', 'label', 'prediction'])
acc_summary = identification_results.groupby('fold').apply(
lambda x: (x['label'] == x['prediction']).sum() / len(x)).describe()
print('Identification summary')
print('ACC: %.3f +/- %.3f' % (acc_summary['mean'], acc_summary['std']))
return
def verification(df):
verification_results = []
users = set(df.index.get_level_values(level='user').unique())
for genuine_user in users:
impostor_users = users.difference([genuine_user])
genuine_samples = df.loc[genuine_user]
_, genuine_samples = zip(*genuine_samples.groupby(level='session'))
train, test = genuine_samples[150:200], genuine_samples[200:]
pohmm = keystroke_model()
pohmm.fit_df(train)
# train_scores = np.array([pohmm.score_df(sample) for sample in train])
scores = []
for sample in test:
score = pohmm.score_df(sample)
scores.append(score)
verification_results.append((genuine_user, True, score))
for imposter_user in impostor_users:
_, impostor_samples = zip(*df.loc[imposter_user].groupby(level='session'))
for sample in impostor_samples[:5]:
score = pohmm.score_df(sample)
scores.append(score)
verification_results.append((genuine_user, False, score))
verification_results = pd.DataFrame.from_records(verification_results,
columns=['user', 'genuine', 'score'])
verification_ROC = verification_results.groupby('user').apply(ROC)
eer_summary = verification_ROC.groupby('user').apply(EER).describe()
auc_summary = verification_ROC.groupby('user').apply(AUC).describe()
print('Verification summary')
print('EER: %.3f +/- %.3f' % (eer_summary['mean'], eer_summary['std']))
print('AUC: %.3f +/- %.3f' % (auc_summary['mean'], auc_summary['std']))
return
if __name__ == '__main__':
print('This example takes about 15 minutes to run on an Intel i5...')
# Download and preprocess the CMU dataset
df = pd.read_csv(DATASET_URL)
df = preprocess(df)
# Verification results obtained using the 4th session as training data,
# sessions 5-8 as genuine and reps 1-5 as impostor
verification(df)
# Identification results obtained by 10-fold stratified cross validation using only the last session
identification(df.groupby(level=0).apply(lambda x: x[-(11 * 50):]).reset_index(level=0, drop=True))
|
vmonaco/pohmm | examples/keystroke.py | keystroke_model | python | def keystroke_model():
model = Pohmm(n_hidden_states=2,
init_spread=2,
emissions=['lognormal', 'lognormal'],
smoothing='freq',
init_method='obs',
thresh=1)
return model | Generates a 2-state model with lognormal emissions and frequency smoothing | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/examples/keystroke.py#L123-L131 | null | import numpy as np
import pandas as pd
from scipy import interp
from sklearn.metrics import auc
from sklearn.metrics.ranking import _binary_clf_curve
from pohmm import Pohmm, PohmmClassifier
# CMU Keystroke Dynamics Benchmark Dataset
# See: http://www.cs.cmu.edu/~keystroke/
# Kevin S. Killourhy and Roy A. Maxion. "Comparing Anomaly Detectors for Keystroke Dynamics"
DATASET_URL = 'http://www.cs.cmu.edu/~keystroke/DSL-StrongPasswordData.csv'
def stratified_kfold(df, n_folds):
"""
Create stratified k-folds from an indexed dataframe
"""
sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique())
sessions.apply(lambda x: np.random.shuffle(x))
folds = []
for i in range(n_folds):
idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) / n_folds):(i + 1) * (len(x) / n_folds)]))
idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values
folds.append(df.loc[idx])
return folds
def user_folds(df, target):
users = df.index.get_level_values(0).unique()
return [df.loc[u].reset_index().set_index([target, 'session']) for u in users]
def preprocess(df):
"""Convert the CMU dataset from row vectors into time/duration row observations"""
def process_row(idx_row):
idx, row = idx_row
# press-press latency
tau = 1000 * row[4::3].astype(float).values
tau = np.r_[np.median(tau), tau]
# duration
duration = 1000 * row[3::3].astype(float).values
keyname = list('.tie5Roanl') + ['enter']
return pd.DataFrame.from_items([
('user', [row['subject']] * 11),
('session', [row['sessionIndex'] * 100 + row['rep']] * 11),
('tau', tau),
('duration', duration),
('event', keyname)
])
df = pd.concat(map(process_row, df.iterrows())).set_index(['user', 'session'])
return df
def roc_curve(y_true, y_score):
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=None, sample_weight=None)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1e-2, thresholds]
fpr = fps / fps[-1]
tpr = tps / tps[-1]
return fpr, 1 - tpr, thresholds
def ROC(scores):
# Generate an ROC curve for each fold, ordered by increasing threshold
roc = scores.groupby('user').apply(lambda x: pd.DataFrame(np.c_[roc_curve(x['genuine'], x['score'])][::-1],
columns=['far', 'frr', 'threshold']))
# interpolate to get the same threshold values in each fold
thresholds = np.sort(roc['threshold'].unique())
roc = roc.groupby(level='user').apply(lambda x: pd.DataFrame(np.c_[thresholds,
interp(thresholds, x['threshold'], x['far']),
interp(thresholds, x['threshold'], x['frr'])],
columns=['threshold', 'far', 'frr']))
roc = roc.reset_index(level=1, drop=True).reset_index()
return roc
def EER(roc):
far, frr = roc['far'].values, roc['frr'].values
def perp(a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
def seg_intersect(a1, a2, b1, b2):
da = a2 - a1
db = b2 - b1
dp = a1 - b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom) * db + b1
d = far <= frr
idx = np.diff(d).nonzero()[0][0]
return seg_intersect(np.array([idx, far[idx]]),
np.array([idx + 1, far[idx + 1]]),
np.array([idx, frr[idx]]),
np.array([idx + 1, frr[idx + 1]]))[1]
def AUC(roc):
return auc(roc['frr'].values, roc['far'].values)
def identification(df, n_folds=10, seed=1234):
# Obtain identification results using k-fold cross validation
np.random.seed(seed)
folds = stratified_kfold(df, n_folds)
identification_results = []
for i in range(n_folds):
print('Fold %d of %d' % (i + 1, n_folds))
test_idx, test_samples = zip(*folds[i].groupby(level=[0, 1]))
train_idx, train_samples = zip(*pd.concat(folds[:i] + folds[i + 1:]).groupby(level=[0, 1]))
test_labels, _ = zip(*test_idx)
train_labels, _ = zip(*train_idx)
cl = PohmmClassifier(keystroke_model)
cl.fit_df(train_labels, train_samples)
for test_label, test_sample in zip(test_labels, test_samples):
result, _ = cl.predict_df(test_sample)
identification_results.append((i, test_label, result))
identification_results = pd.DataFrame.from_records(identification_results,
columns=['fold', 'label', 'prediction'])
acc_summary = identification_results.groupby('fold').apply(
lambda x: (x['label'] == x['prediction']).sum() / len(x)).describe()
print('Identification summary')
print('ACC: %.3f +/- %.3f' % (acc_summary['mean'], acc_summary['std']))
return
def verification(df):
verification_results = []
users = set(df.index.get_level_values(level='user').unique())
for genuine_user in users:
impostor_users = users.difference([genuine_user])
genuine_samples = df.loc[genuine_user]
_, genuine_samples = zip(*genuine_samples.groupby(level='session'))
train, test = genuine_samples[150:200], genuine_samples[200:]
pohmm = keystroke_model()
pohmm.fit_df(train)
# train_scores = np.array([pohmm.score_df(sample) for sample in train])
scores = []
for sample in test:
score = pohmm.score_df(sample)
scores.append(score)
verification_results.append((genuine_user, True, score))
for imposter_user in impostor_users:
_, impostor_samples = zip(*df.loc[imposter_user].groupby(level='session'))
for sample in impostor_samples[:5]:
score = pohmm.score_df(sample)
scores.append(score)
verification_results.append((genuine_user, False, score))
verification_results = pd.DataFrame.from_records(verification_results,
columns=['user', 'genuine', 'score'])
verification_ROC = verification_results.groupby('user').apply(ROC)
eer_summary = verification_ROC.groupby('user').apply(EER).describe()
auc_summary = verification_ROC.groupby('user').apply(AUC).describe()
print('Verification summary')
print('EER: %.3f +/- %.3f' % (eer_summary['mean'], eer_summary['std']))
print('AUC: %.3f +/- %.3f' % (auc_summary['mean'], auc_summary['std']))
return
if __name__ == '__main__':
print('This example takes about 15 minutes to run on an Intel i5...')
# Download and preprocess the CMU dataset
df = pd.read_csv(DATASET_URL)
df = preprocess(df)
# Verification results obtained using the 4th session as training data,
# sessions 5-8 as genuine and reps 1-5 as impostor
verification(df)
# Identification results obtained by 10-fold stratified cross validation using only the last session
identification(df.groupby(level=0).apply(lambda x: x[-(11 * 50):]).reset_index(level=0, drop=True))
|
vmonaco/pohmm | pohmm/utils.py | normalize | python | def normalize(A, axis=None, inplace=False):
if not inplace:
A = A.copy()
A += np.finfo(float).eps
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
A /= Asum
return A | Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data.
axis: int
Dimension along which normalization is performed.
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/utils.py#L40-L68 | null | import numbers
import numpy as np
from itertools import product
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def logsumexp(arr, axis=0):
"""
Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def ph2full(ptrans, htrans):
"""
Convert a p-state transition matrix and h-state matrices to the full transation matrix
The full transmat hase N=n_pstates*n_hstates states
"""
n_pstates = len(ptrans)
n_hstates = len(htrans[0, 0])
N = n_pstates * n_hstates
trans = np.zeros((N, N))
for pidx in range(n_pstates):
for hidx in range(n_hstates):
trans[pidx * n_hstates + hidx] = (ptrans[pidx, :, np.newaxis] * htrans[pidx, :, hidx]).flatten()
return trans
def full2ph(trans, n_pstates):
"""
Convert a full transmat to the respective p-state and h-state transmats
"""
n_hstates = len(trans) / n_pstates
htrans = np.zeros((n_pstates, n_pstates, n_hstates, n_hstates))
for pidx1, pidx2 in product(range(n_pstates), range(n_pstates)):
idx1 = pidx1 * n_hstates
idx2 = pidx2 * n_hstates
htrans[pidx1, pidx2] = trans[idx1:idx1 + n_hstates, idx2:idx2 + n_hstates]
ptrans = normalize(htrans.sum(axis=-1).sum(axis=-1), axis=1)
htrans = normalize(htrans, axis=3)
return ptrans, htrans
def gen_stochastic_matrix(size, random_state=None):
"""
Generate a unfiformly-random stochastic array or matrix
"""
if not type(size) is tuple:
size = (1, size)
assert len(size) == 2
n = random_state.uniform(size=(size[0], size[1] - 1))
n = np.concatenate([np.zeros((size[0], 1)), n, np.ones((size[0], 1))], axis=1)
A = np.diff(np.sort(n))
return A.squeeze()
def steadystate(A, max_iter=100):
"""
Empirically determine the steady state probabilities from a stochastic matrix
"""
P = np.linalg.matrix_power(A, max_iter)
# Determine the unique rows in A
v = []
for i in range(len(P)):
if not np.any([np.allclose(P[i], vi, ) for vi in v]):
v.append(P[i])
return normalize(np.sum(v, axis=0))
def unique_rows(a):
"""
Get the unique row values in matrix a
"""
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def expected_lognormal(logsigma, logmu):
return np.exp(logmu + (logsigma ** 2) / 2)
def expected_normal(logmu, logsigma):
return logmu
|
vmonaco/pohmm | pohmm/utils.py | ph2full | python | def ph2full(ptrans, htrans):
n_pstates = len(ptrans)
n_hstates = len(htrans[0, 0])
N = n_pstates * n_hstates
trans = np.zeros((N, N))
for pidx in range(n_pstates):
for hidx in range(n_hstates):
trans[pidx * n_hstates + hidx] = (ptrans[pidx, :, np.newaxis] * htrans[pidx, :, hidx]).flatten()
return trans | Convert a p-state transition matrix and h-state matrices to the full transation matrix
The full transmat hase N=n_pstates*n_hstates states | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/utils.py#L71-L85 | null | import numbers
import numpy as np
from itertools import product
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def logsumexp(arr, axis=0):
"""
Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def normalize(A, axis=None, inplace=False):
"""
Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data.
axis: int
Dimension along which normalization is performed.
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
"""
if not inplace:
A = A.copy()
A += np.finfo(float).eps
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
A /= Asum
return A
def full2ph(trans, n_pstates):
"""
Convert a full transmat to the respective p-state and h-state transmats
"""
n_hstates = len(trans) / n_pstates
htrans = np.zeros((n_pstates, n_pstates, n_hstates, n_hstates))
for pidx1, pidx2 in product(range(n_pstates), range(n_pstates)):
idx1 = pidx1 * n_hstates
idx2 = pidx2 * n_hstates
htrans[pidx1, pidx2] = trans[idx1:idx1 + n_hstates, idx2:idx2 + n_hstates]
ptrans = normalize(htrans.sum(axis=-1).sum(axis=-1), axis=1)
htrans = normalize(htrans, axis=3)
return ptrans, htrans
def gen_stochastic_matrix(size, random_state=None):
"""
Generate a unfiformly-random stochastic array or matrix
"""
if not type(size) is tuple:
size = (1, size)
assert len(size) == 2
n = random_state.uniform(size=(size[0], size[1] - 1))
n = np.concatenate([np.zeros((size[0], 1)), n, np.ones((size[0], 1))], axis=1)
A = np.diff(np.sort(n))
return A.squeeze()
def steadystate(A, max_iter=100):
"""
Empirically determine the steady state probabilities from a stochastic matrix
"""
P = np.linalg.matrix_power(A, max_iter)
# Determine the unique rows in A
v = []
for i in range(len(P)):
if not np.any([np.allclose(P[i], vi, ) for vi in v]):
v.append(P[i])
return normalize(np.sum(v, axis=0))
def unique_rows(a):
"""
Get the unique row values in matrix a
"""
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def expected_lognormal(logsigma, logmu):
return np.exp(logmu + (logsigma ** 2) / 2)
def expected_normal(logmu, logsigma):
return logmu
|
vmonaco/pohmm | pohmm/utils.py | full2ph | python | def full2ph(trans, n_pstates):
n_hstates = len(trans) / n_pstates
htrans = np.zeros((n_pstates, n_pstates, n_hstates, n_hstates))
for pidx1, pidx2 in product(range(n_pstates), range(n_pstates)):
idx1 = pidx1 * n_hstates
idx2 = pidx2 * n_hstates
htrans[pidx1, pidx2] = trans[idx1:idx1 + n_hstates, idx2:idx2 + n_hstates]
ptrans = normalize(htrans.sum(axis=-1).sum(axis=-1), axis=1)
htrans = normalize(htrans, axis=3)
return ptrans, htrans | Convert a full transmat to the respective p-state and h-state transmats | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/utils.py#L88-L103 | [
"def normalize(A, axis=None, inplace=False):\n \"\"\"\n Normalize the input array so that it sums to 1.\n\n Parameters\n ----------\n A: array, shape (n_samples, n_features)\n Non-normalized input data.\n axis: int\n Dimension along which normalization is performed.\n\n Returns\n -------\n normalized_A: array, shape (n_samples, n_features)\n A with values normalized (summing to 1) along the prescribed axis\n \"\"\"\n if not inplace:\n A = A.copy()\n\n A += np.finfo(float).eps\n Asum = A.sum(axis)\n if axis and A.ndim > 1:\n # Make sure we don't divide by zero.\n Asum[Asum == 0] = 1\n shape = list(A.shape)\n shape[axis] = 1\n Asum.shape = shape\n A /= Asum\n return A\n"
] | import numbers
import numpy as np
from itertools import product
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def logsumexp(arr, axis=0):
"""
Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def normalize(A, axis=None, inplace=False):
"""
Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data.
axis: int
Dimension along which normalization is performed.
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
"""
if not inplace:
A = A.copy()
A += np.finfo(float).eps
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
A /= Asum
return A
def ph2full(ptrans, htrans):
"""
Convert a p-state transition matrix and h-state matrices to the full transation matrix
The full transmat hase N=n_pstates*n_hstates states
"""
n_pstates = len(ptrans)
n_hstates = len(htrans[0, 0])
N = n_pstates * n_hstates
trans = np.zeros((N, N))
for pidx in range(n_pstates):
for hidx in range(n_hstates):
trans[pidx * n_hstates + hidx] = (ptrans[pidx, :, np.newaxis] * htrans[pidx, :, hidx]).flatten()
return trans
def gen_stochastic_matrix(size, random_state=None):
"""
Generate a unfiformly-random stochastic array or matrix
"""
if not type(size) is tuple:
size = (1, size)
assert len(size) == 2
n = random_state.uniform(size=(size[0], size[1] - 1))
n = np.concatenate([np.zeros((size[0], 1)), n, np.ones((size[0], 1))], axis=1)
A = np.diff(np.sort(n))
return A.squeeze()
def steadystate(A, max_iter=100):
"""
Empirically determine the steady state probabilities from a stochastic matrix
"""
P = np.linalg.matrix_power(A, max_iter)
# Determine the unique rows in A
v = []
for i in range(len(P)):
if not np.any([np.allclose(P[i], vi, ) for vi in v]):
v.append(P[i])
return normalize(np.sum(v, axis=0))
def unique_rows(a):
"""
Get the unique row values in matrix a
"""
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def expected_lognormal(logsigma, logmu):
return np.exp(logmu + (logsigma ** 2) / 2)
def expected_normal(logmu, logsigma):
return logmu
|
vmonaco/pohmm | pohmm/utils.py | gen_stochastic_matrix | python | def gen_stochastic_matrix(size, random_state=None):
if not type(size) is tuple:
size = (1, size)
assert len(size) == 2
n = random_state.uniform(size=(size[0], size[1] - 1))
n = np.concatenate([np.zeros((size[0], 1)), n, np.ones((size[0], 1))], axis=1)
A = np.diff(np.sort(n))
return A.squeeze() | Generate a unfiformly-random stochastic array or matrix | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/utils.py#L106-L119 | null | import numbers
import numpy as np
from itertools import product
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def logsumexp(arr, axis=0):
"""
Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def normalize(A, axis=None, inplace=False):
"""
Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data.
axis: int
Dimension along which normalization is performed.
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
"""
if not inplace:
A = A.copy()
A += np.finfo(float).eps
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
A /= Asum
return A
def ph2full(ptrans, htrans):
"""
Convert a p-state transition matrix and h-state matrices to the full transation matrix
The full transmat hase N=n_pstates*n_hstates states
"""
n_pstates = len(ptrans)
n_hstates = len(htrans[0, 0])
N = n_pstates * n_hstates
trans = np.zeros((N, N))
for pidx in range(n_pstates):
for hidx in range(n_hstates):
trans[pidx * n_hstates + hidx] = (ptrans[pidx, :, np.newaxis] * htrans[pidx, :, hidx]).flatten()
return trans
def full2ph(trans, n_pstates):
"""
Convert a full transmat to the respective p-state and h-state transmats
"""
n_hstates = len(trans) / n_pstates
htrans = np.zeros((n_pstates, n_pstates, n_hstates, n_hstates))
for pidx1, pidx2 in product(range(n_pstates), range(n_pstates)):
idx1 = pidx1 * n_hstates
idx2 = pidx2 * n_hstates
htrans[pidx1, pidx2] = trans[idx1:idx1 + n_hstates, idx2:idx2 + n_hstates]
ptrans = normalize(htrans.sum(axis=-1).sum(axis=-1), axis=1)
htrans = normalize(htrans, axis=3)
return ptrans, htrans
def steadystate(A, max_iter=100):
"""
Empirically determine the steady state probabilities from a stochastic matrix
"""
P = np.linalg.matrix_power(A, max_iter)
# Determine the unique rows in A
v = []
for i in range(len(P)):
if not np.any([np.allclose(P[i], vi, ) for vi in v]):
v.append(P[i])
return normalize(np.sum(v, axis=0))
def unique_rows(a):
"""
Get the unique row values in matrix a
"""
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def expected_lognormal(logsigma, logmu):
return np.exp(logmu + (logsigma ** 2) / 2)
def expected_normal(logmu, logsigma):
return logmu
|
vmonaco/pohmm | pohmm/utils.py | steadystate | python | def steadystate(A, max_iter=100):
P = np.linalg.matrix_power(A, max_iter)
# Determine the unique rows in A
v = []
for i in range(len(P)):
if not np.any([np.allclose(P[i], vi, ) for vi in v]):
v.append(P[i])
return normalize(np.sum(v, axis=0)) | Empirically determine the steady state probabilities from a stochastic matrix | train | https://github.com/vmonaco/pohmm/blob/c00f8a62d3005a171d424549a55d46c421859ae9/pohmm/utils.py#L122-L134 | [
"def normalize(A, axis=None, inplace=False):\n \"\"\"\n Normalize the input array so that it sums to 1.\n\n Parameters\n ----------\n A: array, shape (n_samples, n_features)\n Non-normalized input data.\n axis: int\n Dimension along which normalization is performed.\n\n Returns\n -------\n normalized_A: array, shape (n_samples, n_features)\n A with values normalized (summing to 1) along the prescribed axis\n \"\"\"\n if not inplace:\n A = A.copy()\n\n A += np.finfo(float).eps\n Asum = A.sum(axis)\n if axis and A.ndim > 1:\n # Make sure we don't divide by zero.\n Asum[Asum == 0] = 1\n shape = list(A.shape)\n shape[axis] = 1\n Asum.shape = shape\n A /= Asum\n return A\n"
] | import numbers
import numpy as np
from itertools import product
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def logsumexp(arr, axis=0):
"""
Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def normalize(A, axis=None, inplace=False):
"""
Normalize the input array so that it sums to 1.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data.
axis: int
Dimension along which normalization is performed.
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
"""
if not inplace:
A = A.copy()
A += np.finfo(float).eps
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
A /= Asum
return A
def ph2full(ptrans, htrans):
"""
Convert a p-state transition matrix and h-state matrices to the full transation matrix
The full transmat hase N=n_pstates*n_hstates states
"""
n_pstates = len(ptrans)
n_hstates = len(htrans[0, 0])
N = n_pstates * n_hstates
trans = np.zeros((N, N))
for pidx in range(n_pstates):
for hidx in range(n_hstates):
trans[pidx * n_hstates + hidx] = (ptrans[pidx, :, np.newaxis] * htrans[pidx, :, hidx]).flatten()
return trans
def full2ph(trans, n_pstates):
"""
Convert a full transmat to the respective p-state and h-state transmats
"""
n_hstates = len(trans) / n_pstates
htrans = np.zeros((n_pstates, n_pstates, n_hstates, n_hstates))
for pidx1, pidx2 in product(range(n_pstates), range(n_pstates)):
idx1 = pidx1 * n_hstates
idx2 = pidx2 * n_hstates
htrans[pidx1, pidx2] = trans[idx1:idx1 + n_hstates, idx2:idx2 + n_hstates]
ptrans = normalize(htrans.sum(axis=-1).sum(axis=-1), axis=1)
htrans = normalize(htrans, axis=3)
return ptrans, htrans
def gen_stochastic_matrix(size, random_state=None):
"""
Generate a unfiformly-random stochastic array or matrix
"""
if not type(size) is tuple:
size = (1, size)
assert len(size) == 2
n = random_state.uniform(size=(size[0], size[1] - 1))
n = np.concatenate([np.zeros((size[0], 1)), n, np.ones((size[0], 1))], axis=1)
A = np.diff(np.sort(n))
return A.squeeze()
def unique_rows(a):
"""
Get the unique row values in matrix a
"""
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def expected_lognormal(logsigma, logmu):
return np.exp(logmu + (logsigma ** 2) / 2)
def expected_normal(logmu, logsigma):
return logmu
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.__get_header_with_auth | python | def __get_header_with_auth(self):
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header | This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L54-L77 | [
"def __get_header(self):\n header = dict()\n header['Content-Type'] = 'application/json'\n header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'\n\n if self.language:\n header['Accept-Language'] = self.language\n\n return header\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.login | python | def login(self):
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!') | This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L79-L99 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header(self):\n header = dict()\n header['Content-Type'] = 'application/json'\n header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'\n\n if self.language:\n header['Accept-Language'] = self.language\n\n return header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.search_series | python | def search_series(self, name=None, imdb_id=None, zap2it_id=None):
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L102-L120 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def query_param_string_from_option_args(a2q_dict, args_dict):\n \"\"\"\n From a dictionary of arguments to query string parameters, loops through ad arguments list and makes a query string.\n :param a2q_dict: a dictionary containing argument_name > query string parameter name.\n :param args_dict: a dictionary containing the argument_name > argument_value\n :return: a query string.\n \"\"\"\n\n name_value_pairs = dict()\n for ak in a2q_dict.keys():\n value = args_dict[ak]\n if value != None:\n name_value_pairs[a2q_dict[ak]] = str(value)\n\n return urllib.urlencode(name_value_pairs)\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_series_actors | python | def get_series_actors(self, series_id):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L137-L148 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_series_episodes | python | def get_series_episodes(self, series_id, page=1):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L151-L164 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def query_param_string_from_option_args(a2q_dict, args_dict):\n \"\"\"\n From a dictionary of arguments to query string parameters, loops through ad arguments list and makes a query string.\n :param a2q_dict: a dictionary containing argument_name > query string parameter name.\n :param args_dict: a dictionary containing the argument_name > argument_value\n :return: a query string.\n \"\"\"\n\n name_value_pairs = dict()\n for ak in a2q_dict.keys():\n value = args_dict[ak]\n if value != None:\n name_value_pairs[a2q_dict[ak]] = str(value)\n\n return urllib.urlencode(name_value_pairs)\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_series_episodes | python | def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L167-L194 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def query_param_string_from_option_args(a2q_dict, args_dict):\n \"\"\"\n From a dictionary of arguments to query string parameters, loops through ad arguments list and makes a query string.\n :param a2q_dict: a dictionary containing argument_name > query string parameter name.\n :param args_dict: a dictionary containing the argument_name > argument_value\n :return: a query string.\n \"\"\"\n\n name_value_pairs = dict()\n for ak in a2q_dict.keys():\n value = args_dict[ak]\n if value != None:\n name_value_pairs[a2q_dict[ak]] = str(value)\n\n return urllib.urlencode(name_value_pairs)\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_series_images | python | def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id) | Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L226-L249 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def query_param_string_from_option_args(a2q_dict, args_dict):\n \"\"\"\n From a dictionary of arguments to query string parameters, loops through ad arguments list and makes a query string.\n :param a2q_dict: a dictionary containing argument_name > query string parameter name.\n :param args_dict: a dictionary containing the argument_name > argument_value\n :return: a query string.\n \"\"\"\n\n name_value_pairs = dict()\n for ak in a2q_dict.keys():\n value = args_dict[ak]\n if value != None:\n name_value_pairs[a2q_dict[ak]] = str(value)\n\n return urllib.urlencode(name_value_pairs)\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n",
"def inner(instance, *args, **kwargs):\n if not instance.is_authenticated:\n raise UserNotLoggedInException('Authentication is required!')\n else:\n return func(instance, *args, **kwargs)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_updated | python | def get_updated(self, from_time, to_time=None):
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L252-L271 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def query_param_string_from_option_args(a2q_dict, args_dict):\n \"\"\"\n From a dictionary of arguments to query string parameters, loops through ad arguments list and makes a query string.\n :param a2q_dict: a dictionary containing argument_name > query string parameter name.\n :param args_dict: a dictionary containing the argument_name > argument_value\n :return: a query string.\n \"\"\"\n\n name_value_pairs = dict()\n for ak in a2q_dict.keys():\n value = args_dict[ak]\n if value != None:\n name_value_pairs[a2q_dict[ak]] = str(value)\n\n return urllib.urlencode(name_value_pairs)\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_user | python | def get_user(self):
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth())) | Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L274-L282 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_user_favorites | python | def get_user_favorites(self):
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth())) | Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L285-L293 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.delete_user_favorite | python | def delete_user_favorite(self, series_id):
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth())) | Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L296-L306 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.__get_user_ratings | python | def __get_user_ratings(self):
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth())) | Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L322-L330 | null | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_user_ratings | python | def get_user_ratings(self, item_type=None):
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings() | Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L333-L348 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n",
"def inner(instance, *args, **kwargs):\n if not instance.is_authenticated:\n raise UserNotLoggedInException('Authentication is required!')\n else:\n return func(instance, *args, **kwargs)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.add_user_rating | python | def add_user_rating(self, item_type, item_id, item_rating):
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return: | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L351-L366 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.delete_user_rating | python | def delete_user_rating(self, item_type, item_id):
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L369-L382 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_episode | python | def get_episode(self, episode_id):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L385-L396 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_languages | python | def get_languages(self):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L399-L409 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
@authentication_required
def get_language(self, language_id):
"""
Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
|
thilux/tvdb_client | tvdb_client/clients/ApiV2Client.py | ApiV2Client.get_language | python | def get_language(self, language_id):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages/%d' % language_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Retrieves information about the language of the given id.
:param language_id: The TheTVDB Id of the language.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/clients/ApiV2Client.py#L412-L423 | [
"def run_request(request_type, url, retries=5, data=None, headers=None):\n func = __request_factory(request_type)\n\n for attempt in range(retries+1):\n try:\n response = func(url, data=data, headers=headers)\n return response\n except RequestException:\n warnings.warn('Got error on request for attemp %d - %s' %\n (attempt, 'retry is possible' if attempt < retries else 'no retry'))\n return None\n",
"def __get_header_with_auth(self):\n \"\"\"\n This private method returns the HTTP heder filled with the Authorization information with the user token.\n The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB\n (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over\n 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24\n hours have passed since the token generation, a login is performed to generate a new one, instead.\n\n :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.\n \"\"\"\n auth_header = self.__get_header()\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)\n\n if datetime.now() > token_renew_time:\n token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)\n if datetime.now() < token_max_time:\n self.__refresh_token()\n else:\n self.login()\n\n auth_header['Authorization'] = 'Bearer %s' % self.__token\n\n return auth_header\n",
"def parse_raw_response(self, raw_response):\n\n if raw_response.status_code == 200:\n return json.loads(make_str_content(raw_response.content))\n else:\n return self.__handle_error(raw_response)\n"
] | class ApiV2Client(BaseClient):
"""
This is the python library implementation of the TheTVDB API V2. Details of the APIs is documented in the swagger
page maintained by TheTVDB that can be found on this address: https://api.thetvdb.com/swagger
The only APIs not represented by methods in this library are the ones that only returns parameters information of
the actual API.
"""
API_BASE_URL = 'https://api.thetvdb.com'
TOKEN_DURATION_SECONDS = 23 * 3600 # 23 Hours
TOKEN_MAX_DURATION = 24 * 3600 # 24 Hours
def __init__(self, username, api_key, account_identifier, language=None):
self.username = username
self.api_key = api_key
self.account_identifier = account_identifier
self.is_authenticated = False
self.__token = None
self.__auth_time = 0
self.language = language
def __get_header(self):
header = dict()
header['Content-Type'] = 'application/json'
header['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0'
if self.language:
header['Accept-Language'] = self.language
return header
def __refresh_token(self):
headers = self.__get_header()
headers['Authorization'] = 'Bearer %s' % self.__token
resp = requests_util.run_request('get', self.API_BASE_URL + '/refresh_token', headers=headers)
if resp.status_code == 200:
token_resp = self.parse_raw_response(resp)
self.__token = token_resp['token']
self.__auth_time = datetime.now()
def __get_header_with_auth(self):
"""
This private method returns the HTTP heder filled with the Authorization information with the user token.
The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB
(https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over
23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24
hours have passed since the token generation, a login is performed to generate a new one, instead.
:return: A python dictionary representing the HTTP header to be used in TheTVDB API calls.
"""
auth_header = self.__get_header()
auth_header['Authorization'] = 'Bearer %s' % self.__token
token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS)
if datetime.now() > token_renew_time:
token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION)
if datetime.now() < token_max_time:
self.__refresh_token()
else:
self.login()
auth_header['Authorization'] = 'Bearer %s' % self.__token
return auth_header
def login(self):
"""
This method performs the login on TheTVDB given the api key, user name and account identifier.
:return: None
"""
auth_data = dict()
auth_data['apikey'] = self.api_key
auth_data['username'] = self.username
auth_data['userkey'] = self.account_identifier
auth_resp = requests_util.run_request('post', self.API_BASE_URL + '/login', data=json.dumps(auth_data),
headers=self.__get_header())
if auth_resp.status_code == 200:
auth_resp_data = self.parse_raw_response(auth_resp)
self.__token = auth_resp_data['token']
self.__auth_time = datetime.now()
self.is_authenticated = True
else:
raise AuthenticationFailedException('Authentication failed!')
@authentication_required
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""
Searchs for a series in TheTVDB by either its name, imdb_id or zap2it_id.
:param name: the name of the series to look for
:param imdb_id: the IMDB id of the series to look for
:param zap2it_id: the zap2it id of the series to look for.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'name': 'name', 'imdb_id': 'imdbId', 'zap2it_id': 'zap2itId'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', '%s%s?%s' % (self.API_BASE_URL, '/search/series',
query_string),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series(self, series_id):
"""
Retrieves the information of a series from TheTVDB given the series ID.
:param series_id: the id of the series on TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_actors(self, series_id):
"""
Retrieves the information on the actors of a particular series, given its TheTVDB id.
:param series_id: the TheTVDB id of the series
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/actors' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB id. It retrieves a maximum of 100 results per
page.
:param series_id: The TheTVDB id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes?page=%d' %
(series_id, page), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes(self, series_id, episode_number=None, aired_season=None, aired_episode=None,
dvd_season=None, dvd_episode=None, imdb_id=None, page=1):
"""
Retrieves all episodes for a particular series given its TheTVDB and filtered by additional optional details.
:param series_id: The TheTVDB id of the series.
:param episode_number: The optional absolute episode number.
:param aired_season: The optional aired season number.
:param aired_episode: The optional aired episode number.
:param dvd_season: The optional DVD season number.
:param dvd_episode: The optional DVD episode number.
:param imdb_id: The optional IMDB Id of the series.
:param page: The page number. If none is provided, 1 is used by default.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'episode_number': 'absoluteNumber', 'aired_season': 'airedSeason',
'aired_episode': 'airedEpisode', 'dvd_season': 'dvdSeason', 'dvd_episode': 'dvdEpisode',
'imdb_id': 'imdbId', 'page': 'page'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_episodes_summary(self, series_id):
"""
Retrieves the summary of the episodes and seasons of a series given its TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/episodes/summary' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def __get_series_images(self, series_id):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images' % series_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_series_images(self, series_id, image_type=None, resolution=None, sub_key=None):
"""
Retrieves the url to images (posters, fanart) for the series, seasons and episodes of a series given its
TheTVDB id and filtered by additional parameters.
:param series_id: The TheTVDB id of the series.
:param image_type: The optional type of image: posters, fanart, thumbnail, etc.
:param resolution: The optional resolution: i.e. 1280x1024
:param sub_key: The optional subkey: graphical, text.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'image_type': 'keyType', 'resolution': 'resolution', 'sub_key': 'subKey'}
query_string = utils.query_param_string_from_option_args(optional_parameters, arguments)
if len(query_string):
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d/images/query?%s' %
(series_id, query_string), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
else:
return self.__get_series_images(series_id)
@authentication_required
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_user(self):
"""
Retrieves information about the user currently using the api.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_favorites(self):
"""
Retrieves the list of tv series the current user has flagged as favorite.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/favorites',
headers=self.__get_header_with_auth()))
@authentication_required
def delete_user_favorite(self, series_id):
"""
Deletes the series of the provided id from the favorites list of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('delete',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def add_user_favorite(self, series_id):
"""
Added the series related to the series id provided to the list of favorites of the current user.
:param series_id: The TheTVDB id of the series.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('put',
self.API_BASE_URL + '/user/favorites/%d' % series_id,
headers=self.__get_header_with_auth()))
@authentication_required
def __get_user_ratings(self):
"""
Returns a list of the ratings provided by the current user.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
return self.parse_raw_response(requests_util.run_request('get', self.API_BASE_URL + '/user/ratings',
headers=self.__get_header_with_auth()))
@authentication_required
def get_user_ratings(self, item_type=None):
"""
Returns a list of the ratings for the type of item provided, for the current user.
:param item_type: One of: series, episode or banner.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
if item_type:
query_string = 'itemType=%s' % item_type
return self.parse_raw_response(
requests_util.run_request('get', self.API_BASE_URL + '/user/ratings/qeury?%s' % query_string,
headers=self.__get_header_with_auth()))
else:
return self.__get_user_ratings()
@authentication_required
def add_user_rating(self, item_type, item_id, item_rating):
"""
Adds the rating for the item indicated for the current user.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB id of the item.
:param item_rating: The rating from 0 to 10.
:return:
"""
raw_response = requests_util.run_request('put',
self.API_BASE_URL + '/user/ratings/%s/%d/%d' %
(item_type, item_id, item_rating),
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def delete_user_rating(self, item_type, item_id):
"""
Deletes from the list of rating of the current user, the rating provided for the specified element type.
:param item_type: One of: series, episode, banner.
:param item_id: The TheTVDB Id of the item.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('delete',
self.API_BASE_URL + '/user/ratings/%s/%d' %
(item_type, item_id), headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_episode(self, episode_id):
"""
Returns the full information of the episode belonging to the Id provided.
:param episode_id: The TheTVDB id of the episode.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
def get_languages(self):
"""
Returns a list of all language options available in TheTVDB.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/languages',
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response)
@authentication_required
|
thilux/tvdb_client | tvdb_client/utils/utils.py | query_param_string_from_option_args | python | def query_param_string_from_option_args(a2q_dict, args_dict):
name_value_pairs = dict()
for ak in a2q_dict.keys():
value = args_dict[ak]
if value != None:
name_value_pairs[a2q_dict[ak]] = str(value)
return urllib.urlencode(name_value_pairs) | From a dictionary of arguments to query string parameters, loops through ad arguments list and makes a query string.
:param a2q_dict: a dictionary containing argument_name > query string parameter name.
:param args_dict: a dictionary containing the argument_name > argument_value
:return: a query string. | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/utils/utils.py#L6-L20 | null | # encoding=latin-1
__author__ = 'tsantana'
import urllib
def make_str_content(content):
"""
In python3+ requests.Response.content returns bytes instead of ol'good str.
:param content: requests.Response.content
:return: str representation of the requests.Response.content data
"""
if not isinstance(content, str):
content = str(content.decode())
return content
|
thilux/tvdb_client | tvdb_client/utils/utils.py | make_str_content | python | def make_str_content(content):
if not isinstance(content, str):
content = str(content.decode())
return content | In python3+ requests.Response.content returns bytes instead of ol'good str.
:param content: requests.Response.content
:return: str representation of the requests.Response.content data | train | https://github.com/thilux/tvdb_client/blob/2d5106f260367c0abe1284683697874df6343f78/tvdb_client/utils/utils.py#L23-L31 | null | # encoding=latin-1
__author__ = 'tsantana'
import urllib
def query_param_string_from_option_args(a2q_dict, args_dict):
"""
From a dictionary of arguments to query string parameters, loops through ad arguments list and makes a query string.
:param a2q_dict: a dictionary containing argument_name > query string parameter name.
:param args_dict: a dictionary containing the argument_name > argument_value
:return: a query string.
"""
name_value_pairs = dict()
for ak in a2q_dict.keys():
value = args_dict[ak]
if value != None:
name_value_pairs[a2q_dict[ak]] = str(value)
return urllib.urlencode(name_value_pairs)
|
ChrisBeaumont/smother | smother/diff.py | parse_intervals | python | def parse_intervals(diff_report):
for patch in diff_report.patch_set:
try:
old_pf = diff_report.old_file(patch.source_file)
new_pf = diff_report.new_file(patch.target_file)
except InvalidPythonFile:
continue
for hunk in patch:
for line in hunk:
if line.line_type == LINE_TYPE_ADDED:
idx = line.target_line_no
yield ContextInterval(new_pf.filename, new_pf.context(idx))
elif line.line_type == LINE_TYPE_REMOVED:
idx = line.source_line_no
yield ContextInterval(old_pf.filename, old_pf.context(idx))
elif line.line_type in (LINE_TYPE_EMPTY, LINE_TYPE_CONTEXT):
pass
else:
raise AssertionError("Unexpected line type: %s" % line) | Parse a diff into an iterator of Intervals. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/diff.py#L31-L54 | null | """
Diff parser
"""
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import six
from functools import wraps
from more_itertools import unique_justseen
from unidiff.constants import LINE_TYPE_ADDED
from unidiff.constants import LINE_TYPE_CONTEXT
from unidiff.constants import LINE_TYPE_EMPTY
from unidiff.constants import LINE_TYPE_REMOVED
from smother.interval import ContextInterval
from smother.python import InvalidPythonFile
def dedup(func):
@wraps(func)
def wrapper(*args, **kwargs):
return unique_justseen(func(*args, **kwargs))
return wrapper
@dedup
@six.add_metaclass(ABCMeta)
class DiffReporter(object):
@abstractproperty
def patch_set(self):
pass
@abstractmethod
def old_file(self, path):
"""
Given an old file name from a diff report, return a PythonFile
"""
pass
@abstractmethod
def new_file(self, path):
"""
Given a new file name from a diff report, return a PythonFile
"""
pass
def changed_intervals(self):
return parse_intervals(self)
|
ChrisBeaumont/smother | smother/python.py | Visitor._filldown | python | def _filldown(self, lineno):
if self.line > lineno:
# XXX decorated functions make us jump backwards.
# understand this more
return
self.lines.extend(
self.current_context for _ in range(self.line, lineno))
self.line = lineno | Copy current_context into `lines` down up until lineno | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L50-L61 | null | class Visitor(NodeVisitor):
"""
Walk a module's ast. Build a `lines` list whose value at each
index is a context block name for the corresponding source code line.
"""
def __init__(self, prefix=''):
"""
Parameters
----------
prefix : str
The name to give to the module-level context.
"""
self.line = 1 # which line (1-based) do we populate next?
# a stack of nested contexts
self.context = []
self.prefix = prefix
self.current_context = prefix
self.lines = []
def _update_current_context(self):
if self.prefix and self.context:
self.current_context = self.prefix + ':' + '.'.join(self.context)
elif self.prefix:
self.current_context = self.prefix
elif self.context:
self.current_context = '.'.join(self.context)
else:
self.current_context = ''
def _add_section(self, node):
"""
Register the current node as a new context block
"""
self._filldown(node.lineno)
# push a new context onto stack
self.context.append(node.name)
self._update_current_context()
for _ in map(self.visit, iter_child_nodes(node)):
pass
# restore current context
self.context.pop()
self._update_current_context()
def generic_visit(self, node):
if hasattr(node, 'lineno'):
self._filldown(node.lineno + 1)
for _ in map(self.visit, iter_child_nodes(node)):
pass
def visit_Module(self, node): # noqa
# need to manually insert one line for empty modules like __init__.py
if not node.body:
self.lines = [self.current_context]
else:
self.generic_visit(node)
visit_ClassDef = _add_section
visit_FunctionDef = _add_section
visit_AsyncFunctionDef = _add_section
|
ChrisBeaumont/smother | smother/python.py | Visitor._add_section | python | def _add_section(self, node):
self._filldown(node.lineno)
# push a new context onto stack
self.context.append(node.name)
self._update_current_context()
for _ in map(self.visit, iter_child_nodes(node)):
pass
# restore current context
self.context.pop()
self._update_current_context() | Register the current node as a new context block | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L63-L78 | null | class Visitor(NodeVisitor):
"""
Walk a module's ast. Build a `lines` list whose value at each
index is a context block name for the corresponding source code line.
"""
def __init__(self, prefix=''):
"""
Parameters
----------
prefix : str
The name to give to the module-level context.
"""
self.line = 1 # which line (1-based) do we populate next?
# a stack of nested contexts
self.context = []
self.prefix = prefix
self.current_context = prefix
self.lines = []
def _update_current_context(self):
if self.prefix and self.context:
self.current_context = self.prefix + ':' + '.'.join(self.context)
elif self.prefix:
self.current_context = self.prefix
elif self.context:
self.current_context = '.'.join(self.context)
else:
self.current_context = ''
def _filldown(self, lineno):
"""
Copy current_context into `lines` down up until lineno
"""
if self.line > lineno:
# XXX decorated functions make us jump backwards.
# understand this more
return
self.lines.extend(
self.current_context for _ in range(self.line, lineno))
self.line = lineno
def generic_visit(self, node):
if hasattr(node, 'lineno'):
self._filldown(node.lineno + 1)
for _ in map(self.visit, iter_child_nodes(node)):
pass
def visit_Module(self, node): # noqa
# need to manually insert one line for empty modules like __init__.py
if not node.body:
self.lines = [self.current_context]
else:
self.generic_visit(node)
visit_ClassDef = _add_section
visit_FunctionDef = _add_section
visit_AsyncFunctionDef = _add_section
|
ChrisBeaumont/smother | smother/python.py | PythonFile._module_name | python | def _module_name(filename):
absfile = os.path.abspath(filename)
match = filename
for base in [''] + sys.path:
base = os.path.abspath(base)
if absfile.startswith(base):
match = absfile[len(base):]
break
return SUFFIX_RE.sub('', match).lstrip('/').replace('/', '.') | Try to find a module name for a file path
by stripping off a prefix found in sys.modules. | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L140-L155 | null | class PythonFile(object):
"""
A file of python source.
"""
def __init__(self, filename, source=None, prefix=None):
"""
Parameters
----------
filename : str
The path to the file
source : str (optional)
The contents of the file. Will be read from `filename`
if not provided.
prefix : str (optional)
Name to give to the outermost context in the file.
If not provided, will be the "." form of filename
(ie a/b/c.py -> a.b.c)
"""
self.filename = filename
if prefix is None:
self.prefix = self._module_name(filename)
else:
self.prefix = prefix
if source is None:
with open(filename) as infile:
self.source = infile.read()
else:
self.source = source
try:
self.ast = parse(self.source)
except SyntaxError:
raise InvalidPythonFile(self.filename)
visitor = Visitor(prefix=self.prefix)
visitor.visit(self.ast)
self.lines = visitor.lines
@staticmethod
@classmethod
def from_modulename(cls, module_name):
"""
Build a PythonFile given a dotted module name like a.b.c
"""
# XXX make this more robust (pyc files? zip archives? etc)
slug = module_name.replace('.', '/')
paths = [slug + '.py', slug + '/__init__.py']
# always search from current directory
for base in [''] + sys.path:
for path in paths:
fullpath = os.path.join(base, path)
if os.path.exists(fullpath):
return cls(fullpath, prefix=module_name)
else:
raise ValueError("Module not found: %s" % module_name)
@property
def line_count(self):
return len(self.lines)
def context_range(self, context):
"""
Return the 1-offset, right-open range of lines spanned by
a particular context name.
Parameters
----------
context : str
Raises
------
ValueError, if context is not present in the file.
"""
if not context.startswith(self.prefix):
context = self.prefix + '.' + context
lo = hi = None
for idx, line_context in enumerate(self.lines, 1):
# context is hierarchical -- context spans itself
# and any suffix.
if line_context.startswith(context):
lo = lo or idx
hi = idx
if lo is None:
raise ValueError("Context %s does not exist in file %s" %
(context, self.filename))
return lo, hi + 1
def context(self, line):
"""
Return the context for a given 1-offset line number.
"""
# XXX due to a limitation in Visitor,
# non-python code after the last python code
# in a file is not added to self.lines, so we
# have to guard against IndexErrors.
idx = line - 1
if idx >= len(self.lines):
return self.prefix
return self.lines[idx]
|
ChrisBeaumont/smother | smother/python.py | PythonFile.from_modulename | python | def from_modulename(cls, module_name):
# XXX make this more robust (pyc files? zip archives? etc)
slug = module_name.replace('.', '/')
paths = [slug + '.py', slug + '/__init__.py']
# always search from current directory
for base in [''] + sys.path:
for path in paths:
fullpath = os.path.join(base, path)
if os.path.exists(fullpath):
return cls(fullpath, prefix=module_name)
else:
raise ValueError("Module not found: %s" % module_name) | Build a PythonFile given a dotted module name like a.b.c | train | https://github.com/ChrisBeaumont/smother/blob/65d1ea6ae0060d213b0dcbb983c5aa8e7fee07bb/smother/python.py#L158-L173 | null | class PythonFile(object):
"""
A file of python source.
"""
def __init__(self, filename, source=None, prefix=None):
"""
Parameters
----------
filename : str
The path to the file
source : str (optional)
The contents of the file. Will be read from `filename`
if not provided.
prefix : str (optional)
Name to give to the outermost context in the file.
If not provided, will be the "." form of filename
(ie a/b/c.py -> a.b.c)
"""
self.filename = filename
if prefix is None:
self.prefix = self._module_name(filename)
else:
self.prefix = prefix
if source is None:
with open(filename) as infile:
self.source = infile.read()
else:
self.source = source
try:
self.ast = parse(self.source)
except SyntaxError:
raise InvalidPythonFile(self.filename)
visitor = Visitor(prefix=self.prefix)
visitor.visit(self.ast)
self.lines = visitor.lines
@staticmethod
def _module_name(filename):
"""
Try to find a module name for a file path
by stripping off a prefix found in sys.modules.
"""
absfile = os.path.abspath(filename)
match = filename
for base in [''] + sys.path:
base = os.path.abspath(base)
if absfile.startswith(base):
match = absfile[len(base):]
break
return SUFFIX_RE.sub('', match).lstrip('/').replace('/', '.')
@classmethod
@property
def line_count(self):
return len(self.lines)
def context_range(self, context):
"""
Return the 1-offset, right-open range of lines spanned by
a particular context name.
Parameters
----------
context : str
Raises
------
ValueError, if context is not present in the file.
"""
if not context.startswith(self.prefix):
context = self.prefix + '.' + context
lo = hi = None
for idx, line_context in enumerate(self.lines, 1):
# context is hierarchical -- context spans itself
# and any suffix.
if line_context.startswith(context):
lo = lo or idx
hi = idx
if lo is None:
raise ValueError("Context %s does not exist in file %s" %
(context, self.filename))
return lo, hi + 1
def context(self, line):
"""
Return the context for a given 1-offset line number.
"""
# XXX due to a limitation in Visitor,
# non-python code after the last python code
# in a file is not added to self.lines, so we
# have to guard against IndexErrors.
idx = line - 1
if idx >= len(self.lines):
return self.prefix
return self.lines[idx]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.